Skip to content

Commit

Permalink
Record the persist_user_defined_timestamps flag in manifest (facebo…
Browse files Browse the repository at this point in the history
…ok#11515)

Summary:
Start to record the value of the flag `AdvancedColumnFamilyOptions.persist_user_defined_timestamps` in the Manifest and table properties for a SST file when it is created. And use the recorded flag when creating a table reader for the SST file. This flag's default value is true, it is only explicitly recorded if it's false.

Pull Request resolved: facebook#11515

Test Plan:
```
make all check
./version_edit_test
```

Reviewed By: ltamasi

Differential Revision: D46920386

Pulled By: jowlyzhang

fbshipit-source-id: 075c20363d3d2cc1368422ecc805617ed135cc26
  • Loading branch information
jowlyzhang authored and facebook-github-bot committed Jun 22, 2023
1 parent 98c6d7f commit 7521478
Show file tree
Hide file tree
Showing 25 changed files with 284 additions and 192 deletions.
2 changes: 2 additions & 0 deletions db/builder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,8 @@ Status BuildTable(
meta->fd.file_size = file_size;
meta->tail_size = builder->GetTailSize();
meta->marked_for_compaction = builder->NeedCompact();
meta->user_defined_timestamps_persisted =
ioptions.persist_user_defined_timestamps;
assert(meta->fd.GetFileSize() > 0);
tp = builder
->GetTableProperties(); // refresh now that builder is finished
Expand Down
3 changes: 2 additions & 1 deletion db/compaction/compaction_job_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,8 @@ class CompactionJobTestBase : public testing::Test {
kUnknownFileCreationTime,
versions_->GetColumnFamilySet()->GetDefault()->NewEpochNumber(),
kUnknownFileChecksum, kUnknownFileChecksumFuncName, kNullUniqueId64x2,
0, 0);
/*compensated_range_deletion_size=*/0, /*tail_size=*/0,
/*user_defined_timestamps_persisted=*/true);

mutex_.Lock();
EXPECT_OK(versions_->LogAndApply(
Expand Down
2 changes: 2 additions & 0 deletions db/compaction/compaction_outputs.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ Status CompactionOutputs::Finish(const Status& intput_status,
meta->fd.file_size = current_bytes;
meta->tail_size = builder_->GetTailSize();
meta->marked_for_compaction = builder_->NeedCompact();
meta->user_defined_timestamps_persisted = static_cast<bool>(
builder_->GetTableProperties().user_defined_timestamps_persisted);
}
current_output().finished = true;
stats_.bytes_written += current_bytes;
Expand Down
3 changes: 2 additions & 1 deletion db/compaction/compaction_picker_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,8 @@ class CompactionPickerTestBase : public testing::Test {
smallest_seq, largest_seq, marked_for_compact, temperature,
kInvalidBlobFileNumber, kUnknownOldestAncesterTime,
kUnknownFileCreationTime, epoch_number, kUnknownFileChecksum,
kUnknownFileChecksumFuncName, kNullUniqueId64x2, 0, 0);
kUnknownFileChecksumFuncName, kNullUniqueId64x2, 0, 0,
true /* user_defined_timestamps_persisted */);
f->compensated_file_size =
(compensated_file_size != 0) ? compensated_file_size : file_size;
f->oldest_ancester_time = oldest_ancestor_time;
Expand Down
6 changes: 4 additions & 2 deletions db/db_impl/db_impl_compaction_flush.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1777,7 +1777,8 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
f->marked_for_compaction, f->temperature, f->oldest_blob_file_number,
f->oldest_ancester_time, f->file_creation_time, f->epoch_number,
f->file_checksum, f->file_checksum_func_name, f->unique_id,
f->compensated_range_deletion_size, f->tail_size);
f->compensated_range_deletion_size, f->tail_size,
f->user_defined_timestamps_persisted);
}
ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[%s] Apply version edit:\n%s", cfd->GetName().c_str(),
Expand Down Expand Up @@ -3510,7 +3511,8 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
f->oldest_blob_file_number, f->oldest_ancester_time,
f->file_creation_time, f->epoch_number, f->file_checksum,
f->file_checksum_func_name, f->unique_id,
f->compensated_range_deletion_size, f->tail_size);
f->compensated_range_deletion_size, f->tail_size,
f->user_defined_timestamps_persisted);

ROCKS_LOG_BUFFER(
log_buffer,
Expand Down
3 changes: 2 additions & 1 deletion db/db_impl/db_impl_experimental.cc
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,8 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
f->oldest_blob_file_number, f->oldest_ancester_time,
f->file_creation_time, f->epoch_number, f->file_checksum,
f->file_checksum_func_name, f->unique_id,
f->compensated_range_deletion_size, f->tail_size);
f->compensated_range_deletion_size, f->tail_size,
f->user_defined_timestamps_persisted);
}

status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(),
Expand Down
19 changes: 10 additions & 9 deletions db/db_impl/db_impl_open.cc
Original file line number Diff line number Diff line change
Expand Up @@ -618,7 +618,7 @@ Status DBImpl::Recover(
f->file_creation_time, f->epoch_number,
f->file_checksum, f->file_checksum_func_name,
f->unique_id, f->compensated_range_deletion_size,
f->tail_size);
f->tail_size, f->user_defined_timestamps_persisted);
ROCKS_LOG_WARN(immutable_db_options_.info_log,
"[%s] Moving #%" PRIu64
" from from_level-%d to from_level-%d %" PRIu64
Expand Down Expand Up @@ -1689,14 +1689,15 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
constexpr int level = 0;

if (s.ok() && has_output) {
edit->AddFile(
level, meta.fd.GetNumber(), meta.fd.GetPathId(), meta.fd.GetFileSize(),
meta.smallest, meta.largest, meta.fd.smallest_seqno,
meta.fd.largest_seqno, meta.marked_for_compaction, meta.temperature,
meta.oldest_blob_file_number, meta.oldest_ancester_time,
meta.file_creation_time, meta.epoch_number, meta.file_checksum,
meta.file_checksum_func_name, meta.unique_id,
meta.compensated_range_deletion_size, meta.tail_size);
edit->AddFile(level, meta.fd.GetNumber(), meta.fd.GetPathId(),
meta.fd.GetFileSize(), meta.smallest, meta.largest,
meta.fd.smallest_seqno, meta.fd.largest_seqno,
meta.marked_for_compaction, meta.temperature,
meta.oldest_blob_file_number, meta.oldest_ancester_time,
meta.file_creation_time, meta.epoch_number,
meta.file_checksum, meta.file_checksum_func_name,
meta.unique_id, meta.compensated_range_deletion_size,
meta.tail_size, meta.user_defined_timestamps_persisted);

for (const auto& blob : blob_file_additions) {
edit->AddBlobFile(blob);
Expand Down
3 changes: 2 additions & 1 deletion db/experimental.cc
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,8 @@ Status UpdateManifestForFilesState(
lf->oldest_blob_file_number, lf->oldest_ancester_time,
lf->file_creation_time, lf->epoch_number, lf->file_checksum,
lf->file_checksum_func_name, lf->unique_id,
lf->compensated_range_deletion_size, lf->tail_size);
lf->compensated_range_deletion_size, lf->tail_size,
lf->user_defined_timestamps_persisted);
}
}
} else {
Expand Down
7 changes: 6 additions & 1 deletion db/external_sst_file_ingestion_job.cc
Original file line number Diff line number Diff line change
Expand Up @@ -482,7 +482,9 @@ Status ExternalSstFileIngestionJob::Run() {
ingestion_options_.ingest_behind
? kReservedEpochNumberForFileIngestedBehind
: cfd_->NewEpochNumber(),
f.file_checksum, f.file_checksum_func_name, f.unique_id, 0, tail_size);
f.file_checksum, f.file_checksum_func_name, f.unique_id, 0, tail_size,
static_cast<bool>(
f.table_properties.user_defined_timestamps_persisted));
f_metadata.temperature = f.file_temperature;
edit_.AddFile(f.picked_level, f_metadata);
}
Expand Down Expand Up @@ -684,6 +686,9 @@ Status ExternalSstFileIngestionJob::GetIngestedFileInfo(
sst_file_reader.reset(new RandomAccessFileReader(
std::move(sst_file), external_file, nullptr /*Env*/, io_tracer_));

// TODO(yuzhangyu): User-defined timestamps doesn't support external sst file
// ingestion. Pass in the correct `user_defined_timestamps_persisted` flag
// for creating `TableReaderOptions` when the support is there.
status = cfd_->ioptions()->table_factory->NewTableReader(
TableReaderOptions(
*cfd_->ioptions(), sv->mutable_cf_options.prefix_extractor,
Expand Down
2 changes: 1 addition & 1 deletion db/flush_job.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1011,7 +1011,7 @@ Status FlushJob::WriteLevel0Table() {
meta_.file_creation_time, meta_.epoch_number,
meta_.file_checksum, meta_.file_checksum_func_name,
meta_.unique_id, meta_.compensated_range_deletion_size,
meta_.tail_size);
meta_.tail_size, meta_.user_defined_timestamps_persisted);
edit_->SetBlobFileAdditions(std::move(blob_file_additions));
}
// Piggyback FlushJobInfo on the first first flushed memtable.
Expand Down
7 changes: 6 additions & 1 deletion db/import_column_family_job.cc
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,9 @@ Status ImportColumnFamilyJob::Run() {
file_metadata.temperature, kInvalidBlobFileNumber,
oldest_ancester_time, current_time, file_metadata.epoch_number,
kUnknownFileChecksum, kUnknownFileChecksumFuncName, f.unique_id, 0,
tail_size);
tail_size,
static_cast<bool>(
f.table_properties.user_defined_timestamps_persisted));
s = dummy_version_builder.Apply(&dummy_version_edit);
}
}
Expand Down Expand Up @@ -318,6 +320,9 @@ Status ImportColumnFamilyJob::GetIngestedFileInfo(
sst_file_reader.reset(new RandomAccessFileReader(
std::move(sst_file), external_file, nullptr /*Env*/, io_tracer_));

// TODO(yuzhangyu): User-defined timestamps doesn't support importing column
// family. Pass in the correct `user_defined_timestamps_persisted` flag for
// creating `TableReaderOptions` when the support is there.
status = cfd_->ioptions()->table_factory->NewTableReader(
TableReaderOptions(
*cfd_->ioptions(), sv->mutable_cf_options.prefix_extractor,
Expand Down
5 changes: 4 additions & 1 deletion db/repair.cc
Original file line number Diff line number Diff line change
Expand Up @@ -560,6 +560,8 @@ class Repairer {
AddColumnFamily(props->column_family_name, t->column_family_id);
}
t->meta.oldest_ancester_time = props->creation_time;
t->meta.user_defined_timestamps_persisted =
static_cast<bool>(props->user_defined_timestamps_persisted);
}
if (status.ok()) {
uint64_t tail_size = 0;
Expand Down Expand Up @@ -703,7 +705,8 @@ class Repairer {
table->meta.oldest_ancester_time, table->meta.file_creation_time,
table->meta.epoch_number, table->meta.file_checksum,
table->meta.file_checksum_func_name, table->meta.unique_id,
table->meta.compensated_range_deletion_size, table->meta.tail_size);
table->meta.compensated_range_deletion_size, table->meta.tail_size,
table->meta.user_defined_timestamps_persisted);
}
s = dummy_version_builder.Apply(&dummy_edit);
if (s.ok()) {
Expand Down
3 changes: 2 additions & 1 deletion db/table_cache.cc
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,8 @@ Status TableCache::GetTableReader(
false /* force_direct_prefetch */, level, block_cache_tracer_,
max_file_size_for_l0_meta_pin, db_session_id_,
file_meta.fd.GetNumber(), expected_unique_id,
file_meta.fd.largest_seqno, file_meta.tail_size),
file_meta.fd.largest_seqno, file_meta.tail_size,
file_meta.user_defined_timestamps_persisted),
std::move(file_reader), file_meta.fd.GetFileSize(), table_reader,
prefetch_index_and_filter_in_cache);
TEST_SYNC_POINT("TableCache::GetTableReader:0");
Expand Down
Loading

0 comments on commit 7521478

Please sign in to comment.