Skip to content

Commit

Permalink
Replace LOG_XXX with LOG_FMT_XXX (pingcap#4480)
Browse files Browse the repository at this point in the history
  • Loading branch information
Lloyd-Pottiger authored Mar 29, 2022
1 parent 6899e2f commit 5b00cc6
Show file tree
Hide file tree
Showing 14 changed files with 144 additions and 129 deletions.
2 changes: 1 addition & 1 deletion dbms/src/Client/ConnectionPoolWithFailover.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ ConnectionPoolWithFailover::ConnectionPoolWithFailover(
}
}

IConnectionPool::Entry ConnectionPoolWithFailover::get(const Settings * settings, bool /*force_connected*/)
IConnectionPool::Entry ConnectionPoolWithFailover::get(const Settings * settings, bool /*force_connected*/) // NOLINT
{
TryGetEntryFunc try_get_entry = [&](NestedPool & pool, std::string & fail_message) {
return tryGetEntry(pool, fail_message, settings);
Expand Down
11 changes: 5 additions & 6 deletions dbms/src/Common/MyTime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2172,12 +2172,11 @@ std::optional<UInt64> MyDateTimeParser::parseAsPackedUInt(const StringRef & str_
if (!f(ctx, my_time))
{
#ifndef NDEBUG
LOG_FMT_TRACE(
&Poco::Logger::get("MyDateTimeParser"),
"parse error, [str={}] [format={}] [parse_pos={}]",
ctx.view.toString(),
format,
ctx.pos);
LOG_FMT_TRACE(&Poco::Logger::get("MyDateTimeParser"),
"parse error, [str={}] [format={}] [parse_pos={}]",
ctx.view.toString(),
format,
ctx.pos);
#endif
return std::nullopt;
}
Expand Down
53 changes: 28 additions & 25 deletions dbms/src/Common/tests/bench_unified_log_formatter.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ BENCHMARK_DEFINE_F(UnifiedLogBM, ShortOldStream)
{
for (size_t i = 0; i < num_repeat; ++i)
{
LOG_INFO(log, " GC exit within " << elapsed_sec << " sec.");
LOG_FMT_INFO(log, " GC exit within {} sec.", elapsed_sec);
}
}
}
Expand All @@ -118,7 +118,7 @@ BENCHMARK_DEFINE_F(UnifiedLogBM, ShortOldFmt)
{
for (size_t i = 0; i < num_repeat; ++i)
{
LOG_INFO(log, fmt::format(" GC exit within {} sec.", elapsed_sec));
LOG_FMT_INFO(log, " GC exit within {} sec.", elapsed_sec);
}
}
}
Expand Down Expand Up @@ -146,15 +146,20 @@ BENCHMARK_DEFINE_F(UnifiedLogBM, LoogOldStream)
{
for (size_t i = 0; i < num_repeat; ++i)
{
LOG_INFO(
LOG_FMT_INFO(
log,
" GC exit within " << std::setprecision(2) << elapsed_sec << " sec. PageFiles from " //
<< beg.first << "_" << beg.second << " to "
<< end.first << "_" << end.second //
<< ", min writing " << min.first << "_" << min.second
<< ", num files: " << num_files << ", num legacy:" << num_legacy
<< ", compact legacy archive files: " << num_compact
<< ", remove data files: " << num_removed);
" GC exit within {:.2f} sec. PageFiles from {}_{} to {}_{}, min writing {}_{}, num files: {}, num legacy:{}, compact legacy archive files: {}, remove data files: {}",
elapsed_sec,
beg.first,
beg.second,
end.first,
end.second,
min.first,
min.second,
num_files,
num_legacy,
num_compact,
num_removed);
}
}
}
Expand All @@ -168,22 +173,20 @@ BENCHMARK_DEFINE_F(UnifiedLogBM, LoogOldFmt)
{
for (size_t i = 0; i < num_repeat; ++i)
{
LOG_INFO(
LOG_FMT_INFO(
log,
fmt::format(
" GC exit within {:.2f} sec. PageFiles from {}_{} to {}_{}, min writing {}_{}"
", num files: {}, num legacy:{}, compact legacy archive files: {}, remove data files: {}",
elapsed_sec,
beg.first,
beg.second,
end.first,
end.second,
min.first,
min.second,
num_files,
num_legacy,
num_compact,
num_removed));
"GC exit within {:.2f} sec. PageFiles from {}_{} to {}_{}, min writing {}_{}, num files: {}, num legacy:{}, compact legacy archive files: {}, remove data files: {}",
elapsed_sec,
beg.first,
beg.second,
end.first,
end.second,
min.first,
min.second,
num_files,
num_legacy,
num_compact,
num_removed);
}
}
}
Expand Down
11 changes: 5 additions & 6 deletions dbms/src/DataStreams/AggregatingBlockInputStream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,12 +73,11 @@ Block AggregatingBlockInputStream::readImpl()
input_streams.emplace_back(temporary_inputs.back()->block_in);
}

LOG_FMT_TRACE(
log,
"Will merge {} temporary files of size {} MiB compressed, {} MiB uncompressed.",
files.files.size(),
(files.sum_size_compressed / 1048576.0),
(files.sum_size_uncompressed / 1048576.0));
LOG_FMT_TRACE(log,
"Will merge {} temporary files of size {:.2f} MiB compressed, {:.2f} MiB uncompressed.",
files.files.size(),
(files.sum_size_compressed / 1048576.0),
(files.sum_size_uncompressed / 1048576.0));

impl = std::make_unique<MergingAggregatedMemoryEfficientBlockInputStream>(input_streams, params, final, 1, 1, log->identifier());
}
Expand Down
13 changes: 8 additions & 5 deletions dbms/src/DataStreams/ColumnGathererStream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -133,12 +133,15 @@ void ColumnGathererStream::readSuffixImpl()
return;

double seconds = profile_info.total_stopwatch.elapsedSeconds();
std::stringstream speed;
String speed;
if (seconds)
speed << ", " << profile_info.rows / seconds << " rows/sec., "
<< profile_info.bytes / 1048576.0 / seconds << " MiB/sec.";
LOG_TRACE(log, std::fixed << std::setprecision(2) << "Gathered column " << name << " (" << static_cast<double>(profile_info.bytes) / profile_info.rows << " bytes/elem.)"
<< " in " << seconds << " sec." << speed.str());
speed = fmt::format(", {:.2f} rows/sec., {:.2f} MiB/sec.", profile_info.rows / seconds, profile_info.bytes / 1048576.0 / seconds);
LOG_FMT_TRACE(log,
"Gathered column {} ({:.2f} bytes/elem.) in {} sec.{}",
name,
static_cast<double>(profile_info.bytes) / profile_info.rows,
seconds,
speed);
}

} // namespace DB
30 changes: 18 additions & 12 deletions dbms/src/DataStreams/ParallelAggregatingBlockInputStream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,10 +110,10 @@ Block ParallelAggregatingBlockInputStream::readImpl()

LOG_FMT_TRACE(
log,
"Will merge {} temporary files of size {} MiB compressed, {} MiB uncompressed.",
"Will merge {} temporary files of size {:.2f} MiB compressed, {:.2f} MiB uncompressed.",
files.files.size(),
files.sum_size_compressed / 1048576.0,
files.sum_size_uncompressed / 1048576.0);
(files.sum_size_compressed / 1048576.0),
(files.sum_size_uncompressed / 1048576.0));

impl = std::make_unique<MergingAggregatedMemoryEfficientBlockInputStream>(
input_streams,
Expand Down Expand Up @@ -239,21 +239,27 @@ void ParallelAggregatingBlockInputStream::execute()
for (size_t i = 0; i < max_threads; ++i)
{
size_t rows = many_data[i]->size();
LOG_TRACE(
LOG_FMT_TRACE(
log,
std::fixed << std::setprecision(3) << "Aggregated. " << threads_data[i].src_rows << " to " << rows << " rows"
<< " (from " << threads_data[i].src_bytes / 1048576.0 << " MiB)"
<< " in " << elapsed_seconds << " sec."
<< " (" << threads_data[i].src_rows / elapsed_seconds << " rows/sec., " << threads_data[i].src_bytes / elapsed_seconds / 1048576.0 << " MiB/sec.)");
"Aggregated. {} to {} rows (from {:.3f} MiB) in {:.3f} sec. ({:.3f} rows/sec., {:.3f} MiB/sec.)",
threads_data[i].src_rows,
rows,
(threads_data[i].src_bytes / 1048576.0),
elapsed_seconds,
threads_data[i].src_rows / elapsed_seconds,
threads_data[i].src_bytes / elapsed_seconds / 1048576.0);

total_src_rows += threads_data[i].src_rows;
total_src_bytes += threads_data[i].src_bytes;
}
LOG_TRACE(
LOG_FMT_TRACE(
log,
std::fixed << std::setprecision(3) << "Total aggregated. " << total_src_rows << " rows (from " << total_src_bytes / 1048576.0 << " MiB)"
<< " in " << elapsed_seconds << " sec."
<< " (" << total_src_rows / elapsed_seconds << " rows/sec., " << total_src_bytes / elapsed_seconds / 1048576.0 << " MiB/sec.)");
"Total aggregated. {} rows (from {:.3f} MiB) in {:.3f} sec. ({:.3f} rows/sec., {:.3f} MiB/sec.)",
total_src_rows,
(total_src_bytes / 1048576.0),
elapsed_seconds,
total_src_rows / elapsed_seconds,
total_src_bytes / elapsed_seconds / 1048576.0);

/// If there was no data, and we aggregate without keys, we must return single row with the result of empty aggregation.
/// To do this, we pass a block with zero rows to aggregate.
Expand Down
6 changes: 2 additions & 4 deletions dbms/src/Dictionaries/Embedded/RegionsHierarchies.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,8 @@
// limitations under the License.

#include <Dictionaries/Embedded/RegionsHierarchies.h>

#include <common/logger_useful.h>

#include <Poco/DirectoryIterator.h>
#include <common/logger_useful.h>


RegionsHierarchies::RegionsHierarchies(IRegionsHierarchiesDataProviderPtr data_provider)
Expand All @@ -28,7 +26,7 @@ RegionsHierarchies::RegionsHierarchies(IRegionsHierarchiesDataProviderPtr data_p

for (const auto & name : data_provider->listCustomHierarchies())
{
LOG_DEBUG(log, "Adding regions hierarchy for " << name);
LOG_FMT_DEBUG(log, "Adding regions hierarchy for {}", name);
data.emplace(name, data_provider->getHierarchySource(name));
}

Expand Down
2 changes: 1 addition & 1 deletion dbms/src/Dictionaries/HTTPDictionarySource.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ BlockInputStreamPtr HTTPDictionarySource::loadKeys(
const Columns & key_columns,
const std::vector<size_t> & requested_rows)
{
LOG_FMT_TRACE(log, "loadKeys {} size = {}", toString(), requested_rows.size());
LOG_FMT_TRACE(log, "loadKeys {} size = {}", toString(), requested_rows.size());

ReadWriteBufferFromHTTP::OutStreamCallback out_stream_callback = [&](std::ostream & ostr) {
WriteBufferFromOStream out_buffer(ostr);
Expand Down
2 changes: 1 addition & 1 deletion dbms/src/Interpreters/InterpreterSelectQuery.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ void InterpreterSelectQuery::getAndLockStorageWithSchemaVersion(const String & d
auto start_time = Clock::now();
context.getTMTContext().getSchemaSyncer()->syncSchemas(context);
auto schema_sync_cost = std::chrono::duration_cast<std::chrono::milliseconds>(Clock::now() - start_time).count();
LOG_DEBUG(log, __PRETTY_FUNCTION__ << " Table " << qualified_name << " schema sync cost " << schema_sync_cost << "ms.");
LOG_FMT_DEBUG(log, "Table {} schema sync cost {}ms.", qualified_name, schema_sync_cost);

std::tie(storage_tmp, lock, storage_schema_version, ok) = get_and_lock_storage(true);
if (ok)
Expand Down
25 changes: 11 additions & 14 deletions dbms/src/Interpreters/SharedQueries.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,12 +57,11 @@ struct SharedQuery
++finished_clients;
last_finish_time = Poco::Timestamp();

LOG_FMT_TRACE(
log,
"onClientFinish, SharedQuery({}), clients: {}, finished_clients: {}",
query_id,
clients,
finished_clients);
LOG_FMT_TRACE(log,
"onClientFinish, SharedQuery({}), clients:{}, finished_clients: {}",
query_id,
clients,
finished_clients);
}

bool isDone() const
Expand Down Expand Up @@ -124,13 +123,11 @@ class SharedQueries
}
query.connected_clients++;

LOG_FMT_TRACE(
log,
"getOrCreateBlockIO, query_id: {}, clients: {}, connected_clients: {}",
query_id,
clients,
query.connected_clients);

LOG_FMT_TRACE(log,
"getOrCreateBlockIO, query_id: {}, clients: {}, connected_clients: {}",
query_id,
clients,
query.connected_clients);
return query.io;
}
else
Expand Down Expand Up @@ -162,7 +159,7 @@ class SharedQueries

// if (it->second->isDone())
// {
// LOG_TRACE(log, "Remove shared query(" << it->second->query_id << ")");
// LOG_FMT_TRACE(log, "Remove shared query({})", it->second->query_id);
// queries.erase(it);
// }
}
Expand Down
22 changes: 10 additions & 12 deletions dbms/src/Interpreters/Users.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,9 @@ class IPAddressPattern : public IAddressPattern
else
{
String addr(str, 0, pos - str.c_str());
UInt8 prefix_bits_ = parse<UInt8>(pos + 1);
UInt8 prefix_bits = parse<UInt8>(pos + 1);

construct(Poco::Net::IPAddress(addr), prefix_bits_);
construct(Poco::Net::IPAddress(addr), prefix_bits);
}
}

Expand Down Expand Up @@ -210,28 +210,28 @@ class HostRegexpPattern : public IAddressPattern
String domain = cache(addr);
Poco::RegularExpression::Match match;

if (host_regexp.match(domain, match) && HostExactPattern(domain).contains(addr))
return true;

return false;
return host_regexp.match(domain, match) && HostExactPattern(domain).contains(addr);
}
};


bool AddressPatterns::contains(const Poco::Net::IPAddress & addr) const
{
for (size_t i = 0, size = patterns.size(); i < size; ++i)
for (const auto & pattern : patterns)
{
/// If host cannot be resolved, skip it and try next.
try
{
if (patterns[i]->contains(addr))
if (pattern->contains(addr))
return true;
}
catch (const DB::Exception & e)
{
LOG_WARNING(&Poco::Logger::get("AddressPatterns"),
"Failed to check if pattern contains address " << addr.toString() << ". " << e.displayText() << ", code = " << e.code());
LOG_FMT_WARNING(&Poco::Logger::get("AddressPatterns"),
"Failed to check if pattern contains address {}. {}, code = {}",
addr.toString(),
e.displayText(),
e.code());

if (e.code() == ErrorCodes::DNS_ERROR)
{
Expand Down Expand Up @@ -276,8 +276,6 @@ const User & User::getDefaultUser()

User::User(const String & name_)
: name(name_)
, password()
, password_sha256_hex()
, profile(User::DEFAULT_USER_NAME)
, quota(QuotaForInterval::DEFAULT_QUOTA_NAME)
{}
Expand Down
6 changes: 0 additions & 6 deletions dbms/src/Storages/DeltaMerge/DeltaTree.h
Original file line number Diff line number Diff line change
Expand Up @@ -1421,8 +1421,6 @@ typename DT_CLASS::InternPtr DT_CLASS::afterNodeUpdated(T * node)
}

parent_updated = true;

// LOG_TRACE(log, nodeName(node) << " split");
}
else if (T::underflow(node->count) && root != asNode(node)) // adopt or merge
{
Expand Down Expand Up @@ -1474,8 +1472,6 @@ typename DT_CLASS::InternPtr DT_CLASS::afterNodeUpdated(T * node)
right_leaf = as(Leaf, node);
}
--(parent->count);

// LOG_TRACE(log, nodeName(node) << " merge");
}
else
{
Expand All @@ -1487,8 +1483,6 @@ typename DT_CLASS::InternPtr DT_CLASS::afterNodeUpdated(T * node)
parent->sids[std::min(pos, sibling_pos)] = new_sep_sid;
parent->deltas[pos] = checkDelta(node->getDelta());
parent->deltas[sibling_pos] = checkDelta(sibling->getDelta());

// LOG_TRACE(log, nodeName(node) << " adoption");
}

parent_updated = true;
Expand Down
Loading

0 comments on commit 5b00cc6

Please sign in to comment.