Skip to content

Commit

Permalink
Add more tests for assert status checked (facebook#7524)
Browse files Browse the repository at this point in the history
Summary:
Added 10 more tests that pass the ASSERT_STATUS_CHECKED test.

Pull Request resolved: facebook#7524

Reviewed By: akankshamahajan15

Differential Revision: D24323093

Pulled By: ajkr

fbshipit-source-id: 28d4106d0ca1740c3b896c755edf82d504b74801
  • Loading branch information
mrambacher authored and facebook-github-bot committed Dec 23, 2020
1 parent daab760 commit 0241819
Show file tree
Hide file tree
Showing 42 changed files with 487 additions and 384 deletions.
33 changes: 33 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -587,6 +587,7 @@ ifdef ASSERT_STATUS_CHECKED
blob_file_reader_test \
bloom_test \
cassandra_format_test \
cassandra_functional_test \
cassandra_row_merge_test \
cassandra_serialize_test \
cleanable_test \
Expand All @@ -595,6 +596,14 @@ ifdef ASSERT_STATUS_CHECKED
crc32c_test \
dbformat_test \
db_basic_test \
compact_files_test \
compaction_picker_test \
comparator_db_test \
db_encryption_test \
db_iter_test \
db_iter_stress_test \
db_log_iter_test \
db_bloom_filter_test \
db_blob_basic_test \
db_blob_index_test \
db_block_cache_test \
Expand All @@ -615,6 +624,19 @@ ifdef ASSERT_STATUS_CHECKED
deletefile_test \
external_sst_file_test \
options_file_test \
db_statistics_test \
db_table_properties_test \
db_tailing_iter_test \
fault_injection_test \
listener_test \
log_test \
manual_compaction_test \
obsolete_files_test \
perf_context_test \
periodic_work_scheduler_test \
perf_context_test \
version_set_test \
wal_manager_test \
defer_test \
filename_test \
dynamic_bloom_test \
Expand Down Expand Up @@ -658,6 +680,7 @@ ifdef ASSERT_STATUS_CHECKED
ribbon_test \
skiplist_test \
slice_test \
slice_transform_test \
sst_dump_test \
statistics_test \
stats_history_test \
Expand Down Expand Up @@ -694,13 +717,23 @@ ifdef ASSERT_STATUS_CHECKED
flush_job_test \
block_based_filter_block_test \
block_fetcher_test \
block_test \
data_block_hash_index_test \
full_filter_block_test \
partitioned_filter_block_test \
column_family_test \
file_reader_writer_test \
rate_limiter_test \
corruption_test \
reduce_levels_test \
thread_list_test \
compact_on_deletion_collector_test \
db_universal_compaction_test \
import_column_family_test \
option_change_migration_test \
cuckoo_table_builder_test \
cuckoo_table_db_test \
cuckoo_table_reader_test \
memory_test \
table_test \
write_batch_test \
Expand Down
53 changes: 28 additions & 25 deletions db/compact_files_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,9 +91,9 @@ TEST_F(CompactFilesTest, L0ConflictsFiles) {
// create couple files
// Background compaction starts and waits in BackgroundCallCompaction:0
for (int i = 0; i < kLevel0Trigger * 4; ++i) {
db->Put(WriteOptions(), ToString(i), "");
db->Put(WriteOptions(), ToString(100 - i), "");
db->Flush(FlushOptions());
ASSERT_OK(db->Put(WriteOptions(), ToString(i), ""));
ASSERT_OK(db->Put(WriteOptions(), ToString(100 - i), ""));
ASSERT_OK(db->Flush(FlushOptions()));
}

ROCKSDB_NAMESPACE::ColumnFamilyMetaData meta;
Expand Down Expand Up @@ -138,18 +138,18 @@ TEST_F(CompactFilesTest, ObsoleteFiles) {
DB* db = nullptr;
DestroyDB(db_name_, options);
Status s = DB::Open(options, db_name_, &db);
assert(s.ok());
assert(db);
ASSERT_OK(s);
ASSERT_NE(db, nullptr);

// create couple files
for (int i = 1000; i < 2000; ++i) {
db->Put(WriteOptions(), ToString(i),
std::string(kWriteBufferSize / 10, 'a' + (i % 26)));
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
std::string(kWriteBufferSize / 10, 'a' + (i % 26))));
}

auto l0_files = collector->GetFlushedFiles();
ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files, 1));
static_cast_with_check<DBImpl>(db)->TEST_WaitForCompact();
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForCompact());

// verify all compaction input files are deleted
for (auto fname : l0_files) {
Expand Down Expand Up @@ -182,15 +182,17 @@ TEST_F(CompactFilesTest, NotCutOutputOnLevel0) {

// create couple files
for (int i = 0; i < 500; ++i) {
db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26)));
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
std::string(1000, 'a' + (i % 26))));
}
static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable();
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
auto l0_files_1 = collector->GetFlushedFiles();
collector->ClearFlushedFiles();
for (int i = 0; i < 500; ++i) {
db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26)));
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
std::string(1000, 'a' + (i % 26))));
}
static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable();
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
auto l0_files_2 = collector->GetFlushedFiles();
ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_1, 0));
ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_2, 0));
Expand All @@ -213,13 +215,13 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) {
DB* db = nullptr;
DestroyDB(db_name_, options);
Status s = DB::Open(options, db_name_, &db);
assert(s.ok());
ASSERT_OK(s);
assert(db);

// Create 5 files.
for (int i = 0; i < 5; ++i) {
db->Put(WriteOptions(), "key" + ToString(i), "value");
db->Flush(FlushOptions());
ASSERT_OK(db->Put(WriteOptions(), "key" + ToString(i), "value"));
ASSERT_OK(db->Flush(FlushOptions()));
}

auto l0_files = collector->GetFlushedFiles();
Expand All @@ -237,8 +239,8 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) {

// In the meantime flush another file.
TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:0");
db->Put(WriteOptions(), "key5", "value");
db->Flush(FlushOptions());
ASSERT_OK(db->Put(WriteOptions(), "key5", "value"));
ASSERT_OK(db->Flush(FlushOptions()));
TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:1");

compaction_thread.join();
Expand All @@ -249,7 +251,7 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) {

// Make sure we can reopen the DB.
s = DB::Open(options, db_name_, &db);
ASSERT_TRUE(s.ok());
ASSERT_OK(s);
assert(db);
delete db;
}
Expand Down Expand Up @@ -293,8 +295,8 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) {
cf->SetDB(db);

// Write one L0 file
db->Put(WriteOptions(), "K1", "V1");
db->Flush(FlushOptions());
ASSERT_OK(db->Put(WriteOptions(), "K1", "V1"));
ASSERT_OK(db->Flush(FlushOptions()));

// Compact all L0 files using CompactFiles
ROCKSDB_NAMESPACE::ColumnFamilyMetaData meta;
Expand Down Expand Up @@ -337,8 +339,8 @@ TEST_F(CompactFilesTest, SentinelCompressionType) {
DB* db = nullptr;
ASSERT_OK(DB::Open(options, db_name_, &db));

db->Put(WriteOptions(), "key", "val");
db->Flush(FlushOptions());
ASSERT_OK(db->Put(WriteOptions(), "key", "val"));
ASSERT_OK(db->Flush(FlushOptions()));

auto l0_files = collector->GetFlushedFiles();
ASSERT_EQ(1, l0_files.size());
Expand Down Expand Up @@ -377,14 +379,15 @@ TEST_F(CompactFilesTest, GetCompactionJobInfo) {
DB* db = nullptr;
DestroyDB(db_name_, options);
Status s = DB::Open(options, db_name_, &db);
assert(s.ok());
ASSERT_OK(s);
assert(db);

// create couple files
for (int i = 0; i < 500; ++i) {
db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26)));
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
std::string(1000, 'a' + (i % 26))));
}
static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable();
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
auto l0_files_1 = collector->GetFlushedFiles();
CompactionOptions co;
co.compression = CompressionType::kLZ4Compression;
Expand Down
2 changes: 1 addition & 1 deletion db/compaction/compaction_picker_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ class CompactionPickerTest : public testing::Test {
if (temp_vstorage_) {
VersionBuilder builder(FileOptions(), &ioptions_, nullptr,
vstorage_.get(), nullptr);
builder.SaveTo(temp_vstorage_.get());
ASSERT_OK(builder.SaveTo(temp_vstorage_.get()));
vstorage_ = std::move(temp_vstorage_);
}
vstorage_->CalculateBaseBytes(ioptions_, mutable_cf_options_);
Expand Down
40 changes: 20 additions & 20 deletions db/cuckoo_table_db_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,10 +129,10 @@ TEST_F(CuckooTableDBTest, Flush) {
ASSERT_OK(Put("key1", "v1"));
ASSERT_OK(Put("key2", "v2"));
ASSERT_OK(Put("key3", "v3"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(dbfull()->TEST_FlushMemTable());

TablePropertiesCollection ptc;
reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
ASSERT_OK(reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
ASSERT_EQ(1U, ptc.size());
ASSERT_EQ(3U, ptc.begin()->second->num_entries);
ASSERT_EQ("1", FilesPerLevel());
Expand All @@ -146,9 +146,9 @@ TEST_F(CuckooTableDBTest, Flush) {
ASSERT_OK(Put("key4", "v4"));
ASSERT_OK(Put("key5", "v5"));
ASSERT_OK(Put("key6", "v6"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(dbfull()->TEST_FlushMemTable());

reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
ASSERT_OK(reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
ASSERT_EQ(2U, ptc.size());
auto row = ptc.begin();
ASSERT_EQ(3U, row->second->num_entries);
Expand All @@ -164,8 +164,8 @@ TEST_F(CuckooTableDBTest, Flush) {
ASSERT_OK(Delete("key6"));
ASSERT_OK(Delete("key5"));
ASSERT_OK(Delete("key4"));
dbfull()->TEST_FlushMemTable();
reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_OK(reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
ASSERT_EQ(3U, ptc.size());
row = ptc.begin();
ASSERT_EQ(3U, row->second->num_entries);
Expand All @@ -186,10 +186,10 @@ TEST_F(CuckooTableDBTest, FlushWithDuplicateKeys) {
ASSERT_OK(Put("key1", "v1"));
ASSERT_OK(Put("key2", "v2"));
ASSERT_OK(Put("key1", "v3")); // Duplicate
dbfull()->TEST_FlushMemTable();
ASSERT_OK(dbfull()->TEST_FlushMemTable());

TablePropertiesCollection ptc;
reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
ASSERT_OK(reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
ASSERT_EQ(1U, ptc.size());
ASSERT_EQ(2U, ptc.begin()->second->num_entries);
ASSERT_EQ("1", FilesPerLevel());
Expand Down Expand Up @@ -219,7 +219,7 @@ TEST_F(CuckooTableDBTest, Uint64Comparator) {
ASSERT_OK(Put(Uint64Key(1), "v1"));
ASSERT_OK(Put(Uint64Key(2), "v2"));
ASSERT_OK(Put(Uint64Key(3), "v3"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(dbfull()->TEST_FlushMemTable());

ASSERT_EQ("v1", Get(Uint64Key(1)));
ASSERT_EQ("v2", Get(Uint64Key(2)));
Expand All @@ -228,10 +228,10 @@ TEST_F(CuckooTableDBTest, Uint64Comparator) {

// Add more keys.
ASSERT_OK(Delete(Uint64Key(2))); // Delete.
dbfull()->TEST_FlushMemTable();
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_OK(Put(Uint64Key(3), "v0")); // Update.
ASSERT_OK(Put(Uint64Key(4), "v4"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(dbfull()->TEST_FlushMemTable());
ASSERT_EQ("v1", Get(Uint64Key(1)));
ASSERT_EQ("NOT_FOUND", Get(Uint64Key(2)));
ASSERT_EQ("v0", Get(Uint64Key(3)));
Expand All @@ -251,11 +251,11 @@ TEST_F(CuckooTableDBTest, CompactionIntoMultipleFiles) {
for (int idx = 0; idx < 28; ++idx) {
ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx))));
}
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
ASSERT_EQ("1", FilesPerLevel());

dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
true /* disallow trivial move */);
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
true /* disallow trivial move */));
ASSERT_EQ("0,2", FilesPerLevel());
for (int idx = 0; idx < 28; ++idx) {
ASSERT_EQ(std::string(10000, 'a' + char(idx)), Get(Key(idx)));
Expand All @@ -274,15 +274,15 @@ TEST_F(CuckooTableDBTest, SameKeyInsertedInTwoDifferentFilesAndCompacted) {
for (int idx = 0; idx < 11; ++idx) {
ASSERT_OK(Put(Key(idx), std::string(10000, 'a')));
}
dbfull()->TEST_WaitForFlushMemTable();
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
ASSERT_EQ("1", FilesPerLevel());

// Generate one more file in level-0, and should trigger level-0 compaction
for (int idx = 0; idx < 11; ++idx) {
ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx))));
}
dbfull()->TEST_WaitForFlushMemTable();
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));

ASSERT_EQ("0,1", FilesPerLevel());
for (int idx = 0; idx < 11; ++idx) {
Expand All @@ -303,7 +303,7 @@ TEST_F(CuckooTableDBTest, AdaptiveTable) {
ASSERT_OK(Put("key1", "v1"));
ASSERT_OK(Put("key2", "v2"));
ASSERT_OK(Put("key3", "v3"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(dbfull()->TEST_FlushMemTable());

// Write some keys using plain table.
std::shared_ptr<TableFactory> block_based_factory(
Expand All @@ -319,7 +319,7 @@ TEST_F(CuckooTableDBTest, AdaptiveTable) {
Reopen(&options);
ASSERT_OK(Put("key4", "v4"));
ASSERT_OK(Put("key1", "v5"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(dbfull()->TEST_FlushMemTable());

// Write some keys using block based table.
options.table_factory.reset(NewAdaptiveTableFactory(
Expand All @@ -328,7 +328,7 @@ TEST_F(CuckooTableDBTest, AdaptiveTable) {
Reopen(&options);
ASSERT_OK(Put("key5", "v6"));
ASSERT_OK(Put("key2", "v7"));
dbfull()->TEST_FlushMemTable();
ASSERT_OK(dbfull()->TEST_FlushMemTable());

ASSERT_EQ("v5", Get("key1"));
ASSERT_EQ("v7", Get("key2"));
Expand Down
Loading

0 comments on commit 0241819

Please sign in to comment.