Skip to content

Commit

Permalink
Apply formatter to recent 200+ commits. (facebook#5830)
Browse files Browse the repository at this point in the history
Summary:
Further apply formatter to more recent commits.
Pull Request resolved: facebook#5830

Test Plan: Run all existing tests.

Differential Revision: D17488031

fbshipit-source-id: 137458fd94d56dd271b8b40c522b03036943a2ab
  • Loading branch information
siying authored and facebook-github-bot committed Sep 20, 2019
1 parent a5fa873 commit e8263db
Show file tree
Hide file tree
Showing 105 changed files with 515 additions and 544 deletions.
4 changes: 2 additions & 2 deletions cache/cache_bench.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,9 @@ int main() {
}
#else

#include <cinttypes>
#include <sys/types.h>
#include <stdio.h>
#include <sys/types.h>
#include <cinttypes>

#include "port/port.h"
#include "rocksdb/cache.h"
Expand Down
6 changes: 3 additions & 3 deletions db/column_family.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,11 @@

#include "db/column_family.h"

#include <cinttypes>
#include <vector>
#include <string>
#include <algorithm>
#include <cinttypes>
#include <limits>
#include <string>
#include <vector>

#include "db/compaction/compaction_picker.h"
#include "db/compaction/compaction_picker_fifo.h"
Expand Down
2 changes: 1 addition & 1 deletion db/compaction/compaction_job.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

#include <cinttypes>
#include <algorithm>
#include <cinttypes>
#include <functional>
#include <list>
#include <memory>
Expand Down
2 changes: 1 addition & 1 deletion db/compaction/compaction_job_stats_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.

#include <cinttypes>
#include <algorithm>
#include <cinttypes>
#include <iostream>
#include <mutex>
#include <queue>
Expand Down
2 changes: 1 addition & 1 deletion db/compaction/compaction_job_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,9 @@

#ifndef ROCKSDB_LITE

#include <cinttypes>
#include <algorithm>
#include <array>
#include <cinttypes>
#include <map>
#include <string>
#include <tuple>
Expand Down
73 changes: 27 additions & 46 deletions db/db_basic_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1356,9 +1356,9 @@ TEST_F(DBBasicTest, MultiGetBatchedMultiLevel) {
// Test class for batched MultiGet with prefix extractor
// Param bool - If true, use partitioned filters
// If false, use full filter block
class MultiGetPrefixExtractorTest
: public DBBasicTest,
public ::testing::WithParamInterface<bool> {};
class MultiGetPrefixExtractorTest : public DBBasicTest,
public ::testing::WithParamInterface<bool> {
};

TEST_P(MultiGetPrefixExtractorTest, Batched) {
Options options = CurrentOptions();
Expand Down Expand Up @@ -1396,14 +1396,12 @@ TEST_P(MultiGetPrefixExtractorTest, Batched) {
ASSERT_EQ(get_perf_context()->bloom_sst_hit_count, 4);
}

INSTANTIATE_TEST_CASE_P(
MultiGetPrefix, MultiGetPrefixExtractorTest,
::testing::Bool());
INSTANTIATE_TEST_CASE_P(MultiGetPrefix, MultiGetPrefixExtractorTest,
::testing::Bool());

#ifndef ROCKSDB_LITE
class DBMultiGetRowCacheTest
: public DBBasicTest,
public ::testing::WithParamInterface<bool> {};
class DBMultiGetRowCacheTest : public DBBasicTest,
public ::testing::WithParamInterface<bool> {};

TEST_P(DBMultiGetRowCacheTest, MultiGetBatched) {
do {
Expand Down Expand Up @@ -1543,10 +1541,9 @@ TEST_F(DBBasicTest, GetAllKeyVersions) {

class DBBasicTestWithParallelIO
: public DBTestBase,
public testing::WithParamInterface<std::tuple<bool,bool,bool,bool>> {
public testing::WithParamInterface<std::tuple<bool, bool, bool, bool>> {
public:
DBBasicTestWithParallelIO()
: DBTestBase("/db_basic_test_with_parallel_io") {
DBBasicTestWithParallelIO() : DBTestBase("/db_basic_test_with_parallel_io") {
bool compressed_cache = std::get<0>(GetParam());
bool uncompressed_cache = std::get<1>(GetParam());
compression_enabled_ = std::get<2>(GetParam());
Expand All @@ -1570,7 +1567,7 @@ class DBBasicTestWithParallelIO
table_options.block_cache = uncompressed_cache_;
table_options.block_cache_compressed = compressed_cache_;
table_options.flush_block_policy_factory.reset(
new MyFlushBlockPolicyFactory());
new MyFlushBlockPolicyFactory());
options.table_factory.reset(new BlockBasedTableFactory(table_options));
if (!compression_enabled_) {
options.compression = kNoCompression;
Expand Down Expand Up @@ -1598,24 +1595,17 @@ class DBBasicTestWithParallelIO
int num_found() { return uncompressed_cache_->num_found(); }
int num_inserts() { return uncompressed_cache_->num_inserts(); }

int num_lookups_compressed() {
return compressed_cache_->num_lookups();
}
int num_found_compressed() {
return compressed_cache_->num_found();
}
int num_inserts_compressed() {
return compressed_cache_->num_inserts();
}
int num_lookups_compressed() { return compressed_cache_->num_lookups(); }
int num_found_compressed() { return compressed_cache_->num_found(); }
int num_inserts_compressed() { return compressed_cache_->num_inserts(); }

bool fill_cache() { return fill_cache_; }

static void SetUpTestCase() {}
static void TearDownTestCase() {}

private:
class MyFlushBlockPolicyFactory
: public FlushBlockPolicyFactory {
class MyFlushBlockPolicyFactory : public FlushBlockPolicyFactory {
public:
MyFlushBlockPolicyFactory() {}

Expand All @@ -1630,11 +1620,10 @@ class DBBasicTestWithParallelIO
}
};

class MyFlushBlockPolicy
: public FlushBlockPolicy {
class MyFlushBlockPolicy : public FlushBlockPolicy {
public:
explicit MyFlushBlockPolicy(const BlockBuilder& data_block_builder)
: num_keys_(0), data_block_builder_(data_block_builder) {}
: num_keys_(0), data_block_builder_(data_block_builder) {}

bool Update(const Slice& /*key*/, const Slice& /*value*/) override {
if (data_block_builder_.empty()) {
Expand All @@ -1656,11 +1645,10 @@ class DBBasicTestWithParallelIO
const BlockBuilder& data_block_builder_;
};

class MyBlockCache
: public Cache {
class MyBlockCache : public Cache {
public:
explicit MyBlockCache(std::shared_ptr<Cache>& target)
: target_(target), num_lookups_(0), num_found_(0), num_inserts_(0) {}
: target_(target), num_lookups_(0), num_found_(0), num_inserts_(0) {}

virtual const char* Name() const override { return "MyBlockCache"; }

Expand All @@ -1682,9 +1670,7 @@ class DBBasicTestWithParallelIO
return handle;
}

virtual bool Ref(Handle* handle) override {
return target_->Ref(handle);
}
virtual bool Ref(Handle* handle) override { return target_->Ref(handle); }

virtual bool Release(Handle* handle, bool force_erase = false) override {
return target_->Release(handle, force_erase);
Expand All @@ -1694,12 +1680,8 @@ class DBBasicTestWithParallelIO
return target_->Value(handle);
}

virtual void Erase(const Slice& key) override {
target_->Erase(key);
}
virtual uint64_t NewId() override {
return target_->NewId();
}
virtual void Erase(const Slice& key) override { target_->Erase(key); }
virtual uint64_t NewId() override { return target_->NewId(); }

virtual void SetCapacity(size_t capacity) override {
target_->SetCapacity(capacity);
Expand All @@ -1717,9 +1699,7 @@ class DBBasicTestWithParallelIO
return target_->GetCapacity();
}

virtual size_t GetUsage() const override {
return target_->GetUsage();
}
virtual size_t GetUsage() const override { return target_->GetUsage(); }

virtual size_t GetUsage(Handle* handle) const override {
return target_->GetUsage(handle);
Expand All @@ -1745,6 +1725,7 @@ class DBBasicTestWithParallelIO
int num_found() { return num_found_; }

int num_inserts() { return num_inserts_; }

private:
std::shared_ptr<Cache> target_;
int num_lookups_;
Expand Down Expand Up @@ -1777,7 +1758,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
statuses.resize(keys.size());

dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
keys.data(), values.data(), statuses.data(), true);
keys.data(), values.data(), statuses.data(), true);
ASSERT_TRUE(CheckValue(0, values[0].ToString()));
ASSERT_TRUE(CheckValue(50, values[1].ToString()));

Expand All @@ -1789,7 +1770,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
values[0].Reset();
values[1].Reset();
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
keys.data(), values.data(), statuses.data(), true);
keys.data(), values.data(), statuses.data(), true);
ASSERT_TRUE(CheckValue(1, values[0].ToString()));
ASSERT_TRUE(CheckValue(51, values[1].ToString()));

Expand All @@ -1798,15 +1779,15 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {

keys.resize(10);
statuses.resize(10);
std::vector<int> key_ints{1,2,15,16,55,81,82,83,84,85};
std::vector<int> key_ints{1, 2, 15, 16, 55, 81, 82, 83, 84, 85};
for (size_t i = 0; i < key_ints.size(); ++i) {
key_data[i] = Key(key_ints[i]);
keys[i] = Slice(key_data[i]);
statuses[i] = Status::OK();
values[i].Reset();
}
dbfull()->MultiGet(ro, dbfull()->DefaultColumnFamily(), keys.size(),
keys.data(), values.data(), statuses.data(), true);
keys.data(), values.data(), statuses.data(), true);
for (size_t i = 0; i < key_ints.size(); ++i) {
ASSERT_OK(statuses[i]);
ASSERT_TRUE(CheckValue(key_ints[i], values[i].ToString()));
Expand Down
2 changes: 1 addition & 1 deletion db/db_compaction_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4664,7 +4664,7 @@ TEST_F(DBCompactionTest, ConsistencyFailTest) {

rocksdb::SyncPoint::GetInstance()->SetCallBack(
"VersionBuilder::CheckConsistency", [&](void* arg) {
auto p =
auto p =
reinterpret_cast<std::pair<FileMetaData**, FileMetaData**>*>(arg);
// just swap the two FileMetaData so that we hit error
// in CheckConsistency funcion
Expand Down
3 changes: 1 addition & 2 deletions db/db_filesnapshot.cc
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,9 @@

#ifndef ROCKSDB_LITE

#include <cinttypes>
#include <stdint.h>
#include <algorithm>
#include <cinttypes>
#include <string>
#include "db/db_impl/db_impl.h"
#include "db/job_context.h"
Expand Down Expand Up @@ -172,7 +172,6 @@ Status DBImpl::GetCurrentWalFile(std::unique_ptr<LogFile>* current_log_file) {

return wal_manager_.GetLiveWalFile(current_logfile_number, current_log_file);
}

}

#endif // ROCKSDB_LITE
11 changes: 4 additions & 7 deletions db/db_impl/db_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3286,9 +3286,8 @@ Status DestroyDB(const std::string& dbname, const Options& options,
if (type == kMetaDatabase) {
del = DestroyDB(path_to_delete, options);
} else if (type == kTableFile || type == kLogFile) {
del =
DeleteDBFile(&soptions, path_to_delete, dbname,
/*force_bg=*/false, /*force_fg=*/!wal_in_db_path);
del = DeleteDBFile(&soptions, path_to_delete, dbname,
/*force_bg=*/false, /*force_fg=*/!wal_in_db_path);
} else {
del = env->DeleteFile(path_to_delete);
}
Expand Down Expand Up @@ -4003,8 +4002,7 @@ Status DBImpl::IngestExternalFiles(
Status DBImpl::CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle) {
const ExportImportFilesMetaData& metadata, ColumnFamilyHandle** handle) {
assert(handle != nullptr);
assert(*handle == nullptr);
std::string cf_comparator_name = options.comparator->Name();
Expand Down Expand Up @@ -4045,8 +4043,7 @@ Status DBImpl::CreateColumnFamilyWithImport(
// reuse the file number that has already assigned to the internal file,
// and this will overwrite the external file. To protect the external
// file, we have to make sure the file number will never being reused.
next_file_number =
versions_->FetchAddFileNumber(metadata.files.size());
next_file_number = versions_->FetchAddFileNumber(metadata.files.size());
auto cf_options = cfd->GetLatestMutableCFOptions();
status = versions_->LogAndApply(cfd, *cf_options, &dummy_edit, &mutex_,
directories_.GetDbDir());
Expand Down
3 changes: 2 additions & 1 deletion db/db_impl/db_impl_compaction_flush.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1565,7 +1565,8 @@ Status DBImpl::FlushMemTable(ColumnFamilyData* cfd,
if (stats_cf_flush_needed) {
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Force flushing stats CF with manual flush of %s "
"to avoid holding old logs", cfd->GetName().c_str());
"to avoid holding old logs",
cfd->GetName().c_str());
s = SwitchMemtable(cfd_stats, &context);
flush_memtable_id = cfd_stats->imm()->GetLatestMemTableID();
flush_req.emplace_back(cfd_stats, flush_memtable_id);
Expand Down
9 changes: 4 additions & 5 deletions db/db_impl/db_impl_open.cc
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,9 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) {
std::vector<std::string> filenames;
result.env->GetChildren(result.wal_dir, &filenames);
for (std::string& filename : filenames) {
if (filename.find(".log.trash",
filename.length() - std::string(".log.trash").length()) !=
std::string::npos) {
if (filename.find(".log.trash", filename.length() -
std::string(".log.trash").length()) !=
std::string::npos) {
std::string trash_file = result.wal_dir + "/" + filename;
result.env->DeleteFile(trash_file);
}
Expand Down Expand Up @@ -1352,8 +1352,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
return s;
}

impl->wal_in_db_path_ =
IsWalDirSameAsDBPath(&impl->immutable_db_options_);
impl->wal_in_db_path_ = IsWalDirSameAsDBPath(&impl->immutable_db_options_);

impl->mutex_.Lock();
// Handles create_if_missing, error_if_exists
Expand Down
3 changes: 1 addition & 2 deletions db/db_impl/db_impl_secondary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -588,8 +588,7 @@ Status DB::OpenAsSecondary(
&impl->write_controller_));
impl->column_family_memtables_.reset(
new ColumnFamilyMemTablesImpl(impl->versions_->GetColumnFamilySet()));
impl->wal_in_db_path_ =
IsWalDirSameAsDBPath(&impl->immutable_db_options_);
impl->wal_in_db_path_ = IsWalDirSameAsDBPath(&impl->immutable_db_options_);

impl->mutex_.Lock();
s = impl->Recover(column_families, true, false, false);
Expand Down
9 changes: 5 additions & 4 deletions db/db_impl/db_secondary_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -605,9 +605,9 @@ TEST_F(DBSecondaryTest, SwitchWAL) {
TEST_F(DBSecondaryTest, SwitchWALMultiColumnFamilies) {
const int kNumKeysPerMemtable = 1;
SyncPoint::GetInstance()->DisableProcessing();
SyncPoint::GetInstance()->LoadDependency({
{"DBImpl::BackgroundCallFlush:ContextCleanedUp",
"DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp"}});
SyncPoint::GetInstance()->LoadDependency(
{{"DBImpl::BackgroundCallFlush:ContextCleanedUp",
"DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp"}});
SyncPoint::GetInstance()->EnableProcessing();
const std::string kCFName1 = "pikachu";
Options options;
Expand Down Expand Up @@ -662,7 +662,8 @@ TEST_F(DBSecondaryTest, SwitchWALMultiColumnFamilies) {
Put(0 /*cf*/, "key" + std::to_string(k), "value" + std::to_string(k)));
ASSERT_OK(
Put(1 /*cf*/, "key" + std::to_string(k), "value" + std::to_string(k)));
TEST_SYNC_POINT("DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp");
TEST_SYNC_POINT(
"DBSecondaryTest::SwitchWALMultipleColumnFamilies:BeforeCatchUp");
ASSERT_OK(db_secondary_->TryCatchUpWithPrimary());
verify_db(dbfull(), handles_, db_secondary_, handles_secondary_);
SyncPoint::GetInstance()->ClearTrace();
Expand Down
4 changes: 2 additions & 2 deletions db/db_info_dumper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,10 @@

#include "db/db_info_dumper.h"

#include <cinttypes>
#include <stdio.h>
#include <string>
#include <algorithm>
#include <cinttypes>
#include <string>
#include <vector>

#include "file/filename.h"
Expand Down
Loading

0 comments on commit e8263db

Please sign in to comment.