diff --git a/lm/kenlm_benchmark_main.cc b/lm/kenlm_benchmark_main.cc index 1704ec6c9..0fc687a66 100644 --- a/lm/kenlm_benchmark_main.cc +++ b/lm/kenlm_benchmark_main.cc @@ -31,9 +31,16 @@ template void QueryFromBytes(const Model &model, int Width kEOS = model.GetVocabulary().EndSentence(); Width buf[4096]; float sum = 0.0; + uint64_t completed = 0; + + double loaded = util::CPUTime(); + std::cout << "After loading: "; + util::PrintUsage(std::cout); + while (std::size_t got = util::ReadOrEOF(fd_in, buf, sizeof(buf))) { UTIL_THROW_IF2(got % sizeof(Width), "File size not a multiple of vocab id size " << sizeof(Width)); got /= sizeof(Width); + completed += got; // Do even stuff first. const Width *even_end = buf + (got & ~1); // Alternating states @@ -50,7 +57,9 @@ template void QueryFromBytes(const Model &model, int next_state = (*i++ == kEOS) ? begin_state : &state[2]; } } - std::cout << "Sum is " << sum << std::endl; + std::cerr << "Probability sum is " << sum << std::endl; + + std::cout << "CPU_excluding_load:" << (util::CPUTime() - loaded) << " CPU_per_query:" << ((util::CPUTime() - loaded) / static_cast(completed)) << std::endl; } template void DispatchFunction(const Model &model, bool query) { @@ -62,7 +71,10 @@ template void DispatchFunction(const Model &model, bo } template void DispatchWidth(const char *file, bool query) { - Model model(file); + lm::ngram::Config config; + config.load_method = util::READ; + std::cerr << "Using load_method = READ." << std::endl; + Model model(file, config); lm::WordIndex bound = model.GetVocabulary().Bound(); if (bound <= 256) { DispatchFunction(model, query);