forked from dmlc/xgboost
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Histogram Optimized Tree Grower (dmlc#1940)
* Support histogram-based algorithm + multiple tree growing strategy * Add a brand new updater to support histogram-based algorithm, which buckets continuous features into discrete bins to speed up training. To use it, set `tree_method = fast_hist` to configuration. * Support multiple tree growing strategies. For now, two policies are supported: * `grow_policy=depthwise` (default): favor splitting at nodes closest to the root, i.e. grow depth-wise. * `grow_policy=lossguide`: favor splitting at nodes with highest loss change * Improve single-threaded performance * Unroll critical loops * Introduce specialized code for dense data (i.e. no missing values) * Additional training parameters: `max_leaves`, `max_bin`, `grow_policy`, `verbose` * Adding a small test for hist method * Fix memory error in row_set.h When std::vector is resized, a reference to one of its element may become stale. Any such reference must be updated as well. * Resolve cross-platform compilation issues * Versions of g++ older than 4.8 lacks support for a few C++11 features, e.g. alignas(*) and new initializer syntax. To support g++ 4.6, use pre-C++11 initializer and remove alignas(*). * Versions of MSVC older than 2015 does not support alignas(*). To support MSVC 2012, remove alignas(*). * For g++ 4.8 and newer, alignas(*) is enabled for performance benefits. * Some old compilers (MSVC 2012, g++ 4.6) do not support template aliases (which uses `using` to declate type aliases). So always use `typedef`. * Fix a host of CI issues * Remove dependency for libz on osx * Fix heading for hist_util * Fix minor style issues * Add missing #include * Remove extraneous logging * Enable tree_method=hist in R * Renaming HistMaker to GHistBuilder to avoid confusion * Fix R integration * Respond to style comments * Consistent tie-breaking for priority queue using timestamps * Last-minute style fixes * Fix issuecomment-271977647 The way we quantize data is broken. The agaricus data consists of all categorical values. When NAs are converted into 0's, `HistCutMatrix::Init` assign both 0's and 1's to the same single bin. Why? gmat only the smallest value (0) and an upper bound (2), which is twice the maximum value (1). Add the maximum value itself to gmat to fix the issue. * Fix issuecomment-272266358 * Remove padding from cut values for the continuous case * For categorical/ordinal values, use midpoints as bin boundaries to be safe * Fix CI issue -- do not use xrange(*) * Fix corner case in quantile sketch Signed-off-by: Philip Cho <[email protected]> * Adding a test for an edge case in quantile sketcher max_bin=2 used to cause an exception. * Fix fast_hist test The test used to require a strictly increasing Test AUC for all examples. One of them exhibits a small blip in Test AUC before achieving a Test AUC of 1. (See bottom.) Solution: do not require monotonic increase for this particular example. [0] train-auc:0.99989 test-auc:0.999497 [1] train-auc:1 test-auc:0.999749 [2] train-auc:1 test-auc:0.999749 [3] train-auc:1 test-auc:0.999749 [4] train-auc:1 test-auc:0.999749 [5] train-auc:1 test-auc:0.999497 [6] train-auc:1 test-auc:1 [7] train-auc:1 test-auc:1 [8] train-auc:1 test-auc:1 [9] train-auc:1 test-auc:1
- Loading branch information
Showing
13 changed files
with
1,509 additions
and
31 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,227 @@ | ||
/*! | ||
* Copyright 2017 by Contributors | ||
* \file hist_util.h | ||
* \brief Utilities to store histograms | ||
* \author Philip Cho, Tianqi Chen | ||
*/ | ||
#include <dmlc/omp.h> | ||
#include <vector> | ||
#include "./sync.h" | ||
#include "./hist_util.h" | ||
#include "./quantile.h" | ||
|
||
namespace xgboost { | ||
namespace common { | ||
|
||
void HistCutMatrix::Init(DMatrix* p_fmat, size_t max_num_bins) { | ||
typedef common::WXQuantileSketch<bst_float, bst_float> WXQSketch; | ||
const MetaInfo& info = p_fmat->info(); | ||
|
||
// safe factor for better accuracy | ||
const int kFactor = 8; | ||
std::vector<WXQSketch> sketchs; | ||
|
||
int nthread; | ||
#pragma omp parallel | ||
{ | ||
nthread = omp_get_num_threads(); | ||
} | ||
nthread = std::max(nthread / 2, 1); | ||
|
||
unsigned nstep = (info.num_col + nthread - 1) / nthread; | ||
unsigned ncol = static_cast<unsigned>(info.num_col); | ||
sketchs.resize(info.num_col); | ||
for (auto& s : sketchs) { | ||
s.Init(info.num_row, 1.0 / (max_num_bins * kFactor)); | ||
} | ||
|
||
dmlc::DataIter<RowBatch>* iter = p_fmat->RowIterator(); | ||
iter->BeforeFirst(); | ||
while (iter->Next()) { | ||
const RowBatch& batch = iter->Value(); | ||
#pragma omp parallel num_threads(nthread) | ||
{ | ||
CHECK_EQ(nthread, omp_get_num_threads()); | ||
unsigned tid = static_cast<unsigned>(omp_get_thread_num()); | ||
unsigned begin = std::min(nstep * tid, ncol); | ||
unsigned end = std::min(nstep * (tid + 1), ncol); | ||
for (size_t i = 0; i < batch.size; ++i) { // NOLINT(*) | ||
bst_uint ridx = static_cast<bst_uint>(batch.base_rowid + i); | ||
RowBatch::Inst inst = batch[i]; | ||
for (bst_uint j = 0; j < inst.length; ++j) { | ||
if (inst[j].index >= begin && inst[j].index < end) { | ||
sketchs[inst[j].index].Push(inst[j].fvalue, info.GetWeight(ridx)); | ||
} | ||
} | ||
} | ||
} | ||
} | ||
|
||
// gather the histogram data | ||
rabit::SerializeReducer<WXQSketch::SummaryContainer> sreducer; | ||
std::vector<WXQSketch::SummaryContainer> summary_array; | ||
summary_array.resize(sketchs.size()); | ||
for (size_t i = 0; i < sketchs.size(); ++i) { | ||
WXQSketch::SummaryContainer out; | ||
sketchs[i].GetSummary(&out); | ||
summary_array[i].Reserve(max_num_bins * kFactor); | ||
summary_array[i].SetPrune(out, max_num_bins * kFactor); | ||
} | ||
size_t nbytes = WXQSketch::SummaryContainer::CalcMemCost(max_num_bins * kFactor); | ||
sreducer.Allreduce(dmlc::BeginPtr(summary_array), nbytes, summary_array.size()); | ||
|
||
this->min_val.resize(info.num_col); | ||
row_ptr.push_back(0); | ||
for (size_t fid = 0; fid < summary_array.size(); ++fid) { | ||
WXQSketch::SummaryContainer a; | ||
a.Reserve(max_num_bins); | ||
a.SetPrune(summary_array[fid], max_num_bins); | ||
const bst_float mval = a.data[0].value; | ||
this->min_val[fid] = mval - fabs(mval); | ||
if (a.size > 1 && a.size <= 16) { | ||
/* specialized code categorial / ordinal data -- use midpoints */ | ||
for (size_t i = 1; i < a.size; ++i) { | ||
bst_float cpt = (a.data[i].value + a.data[i - 1].value) / 2.0; | ||
if (i == 1 || cpt > cut.back()) { | ||
cut.push_back(cpt); | ||
} | ||
} | ||
} else { | ||
for (size_t i = 2; i < a.size; ++i) { | ||
bst_float cpt = a.data[i - 1].value; | ||
if (i == 2 || cpt > cut.back()) { | ||
cut.push_back(cpt); | ||
} | ||
} | ||
} | ||
// push a value that is greater than anything | ||
if (a.size != 0) { | ||
bst_float cpt = a.data[a.size - 1].value; | ||
// this must be bigger than last value in a scale | ||
bst_float last = cpt + fabs(cpt); | ||
cut.push_back(last); | ||
} | ||
row_ptr.push_back(cut.size()); | ||
} | ||
} | ||
|
||
|
||
void GHistIndexMatrix::Init(DMatrix* p_fmat) { | ||
CHECK(cut != nullptr); | ||
dmlc::DataIter<RowBatch>* iter = p_fmat->RowIterator(); | ||
hit_count.resize(cut->row_ptr.back(), 0); | ||
|
||
int nthread; | ||
#pragma omp parallel | ||
{ | ||
nthread = omp_get_num_threads(); | ||
} | ||
nthread = std::max(nthread / 2, 1); | ||
|
||
iter->BeforeFirst(); | ||
row_ptr.push_back(0); | ||
while (iter->Next()) { | ||
const RowBatch& batch = iter->Value(); | ||
size_t rbegin = row_ptr.size() - 1; | ||
for (size_t i = 0; i < batch.size; ++i) { | ||
row_ptr.push_back(batch[i].length + row_ptr.back()); | ||
} | ||
index.resize(row_ptr.back()); | ||
|
||
CHECK_GT(cut->cut.size(), 0); | ||
CHECK_EQ(cut->row_ptr.back(), cut->cut.size()); | ||
|
||
omp_ulong bsize = static_cast<omp_ulong>(batch.size); | ||
#pragma omp parallel for num_threads(nthread) schedule(static) | ||
for (omp_ulong i = 0; i < bsize; ++i) { // NOLINT(*) | ||
size_t ibegin = row_ptr[rbegin + i]; | ||
size_t iend = row_ptr[rbegin + i + 1]; | ||
RowBatch::Inst inst = batch[i]; | ||
CHECK_EQ(ibegin + inst.length, iend); | ||
for (bst_uint j = 0; j < inst.length; ++j) { | ||
unsigned fid = inst[j].index; | ||
auto cbegin = cut->cut.begin() + cut->row_ptr[fid]; | ||
auto cend = cut->cut.begin() + cut->row_ptr[fid + 1]; | ||
CHECK(cbegin != cend); | ||
auto it = std::upper_bound(cbegin, cend, inst[j].fvalue); | ||
if (it == cend) it = cend - 1; | ||
unsigned idx = static_cast<unsigned>(it - cut->cut.begin()); | ||
index[ibegin + j] = idx; | ||
} | ||
std::sort(index.begin() + ibegin, index.begin() + iend); | ||
} | ||
} | ||
} | ||
|
||
void GHistBuilder::BuildHist(const std::vector<bst_gpair>& gpair, | ||
const RowSetCollection::Elem row_indices, | ||
const GHistIndexMatrix& gmat, | ||
GHistRow hist) { | ||
CHECK(!data_.empty()) << "GHistBuilder must be initialized"; | ||
CHECK_EQ(data_.size(), nbins_ * nthread_) << "invalid dimensions for temp buffer"; | ||
|
||
std::fill(data_.begin(), data_.end(), GHistEntry()); | ||
|
||
const int K = 8; // loop unrolling factor | ||
const bst_omp_uint nthread = static_cast<bst_omp_uint>(this->nthread_); | ||
const bst_omp_uint nrows = row_indices.end - row_indices.begin; | ||
const bst_omp_uint rest = nrows % K; | ||
|
||
#pragma omp parallel for num_threads(nthread) schedule(static) | ||
for (bst_omp_uint i = 0; i < nrows - rest; i += K) { | ||
const bst_omp_uint tid = omp_get_thread_num(); | ||
const size_t off = tid * nbins_; | ||
bst_uint rid[K]; | ||
bst_gpair stat[K]; | ||
size_t ibegin[K], iend[K]; | ||
for (int k = 0; k < K; ++k) { | ||
rid[k] = row_indices.begin[i + k]; | ||
} | ||
for (int k = 0; k < K; ++k) { | ||
stat[k] = gpair[rid[k]]; | ||
} | ||
for (int k = 0; k < K; ++k) { | ||
ibegin[k] = static_cast<size_t>(gmat.row_ptr[rid[k]]); | ||
iend[k] = static_cast<size_t>(gmat.row_ptr[rid[k] + 1]); | ||
} | ||
for (int k = 0; k < K; ++k) { | ||
for (size_t j = ibegin[k]; j < iend[k]; ++j) { | ||
const size_t bin = gmat.index[j]; | ||
data_[off + bin].Add(stat[k]); | ||
} | ||
} | ||
} | ||
for (bst_omp_uint i = nrows - rest; i < nrows; ++i) { | ||
const bst_uint rid = row_indices.begin[i]; | ||
const bst_gpair stat = gpair[rid]; | ||
const size_t ibegin = static_cast<size_t>(gmat.row_ptr[rid]); | ||
const size_t iend = static_cast<size_t>(gmat.row_ptr[rid + 1]); | ||
for (size_t j = ibegin; j < iend; ++j) { | ||
const size_t bin = gmat.index[j]; | ||
data_[bin].Add(stat); | ||
} | ||
} | ||
|
||
/* reduction */ | ||
const bst_omp_uint nbins = static_cast<bst_omp_uint>(nbins_); | ||
#pragma omp parallel for num_threads(nthread) schedule(static) | ||
for (bst_omp_uint bin_id = 0; bin_id < nbins; ++bin_id) { | ||
for (bst_omp_uint tid = 0; tid < nthread; ++tid) { | ||
hist.begin[bin_id].Add(data_[tid * nbins_ + bin_id]); | ||
} | ||
} | ||
} | ||
|
||
void GHistBuilder::SubtractionTrick(GHistRow self, | ||
GHistRow sibling, | ||
GHistRow parent) { | ||
const bst_omp_uint nthread = static_cast<bst_omp_uint>(this->nthread_); | ||
const bst_omp_uint nbins = static_cast<bst_omp_uint>(nbins_); | ||
#pragma omp parallel for num_threads(nthread) schedule(static) | ||
for (bst_omp_uint bin_id = 0; bin_id < nbins; ++bin_id) { | ||
self.begin[bin_id].SetSubtract(parent.begin[bin_id], sibling.begin[bin_id]); | ||
} | ||
} | ||
|
||
} // namespace common | ||
} // namespace xgboost |
Oops, something went wrong.