Skip to content

Commit

Permalink
Fail DB::Open() when the requested compression is not available
Browse files Browse the repository at this point in the history
Summary:
Currently RocksDB silently ignores this issue and doesn't compress the data. Based on discussion, we agree that this is pretty bad because it can cause confusion for our users.

This patch fails DB::Open() if we don't support the compression that is specified in the options.

Test Plan: make check with LZ4 not present. If Snappy is not present all tests will just fail because Snappy is our default library. We should make Snappy the requirement, since without it our default DB::Open() fails.

Reviewers: sdong, MarkCallaghan, rven, yhchiang

Reviewed By: yhchiang

Subscribers: dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D39687
  • Loading branch information
igorcanadi committed Jun 18, 2015
1 parent 69bb210 commit 760e9a9
Show file tree
Hide file tree
Showing 9 changed files with 132 additions and 75 deletions.
1 change: 1 addition & 0 deletions HISTORY.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
* DB::CompactRange() now accept CompactRangeOptions instead of multiple paramters. CompactRangeOptions is defined in include/rocksdb/options.h.
* Add force_bottommost_level_compaction option to CompactRangeOptions, which prevent compaction from skipping compacting bottommost level.
* Add Cache.GetPinnedUsage() to get the size of memory occupied by entries that are in use by the system.
* DB:Open() will fail if the compression specified in Options is not linked with the binary. If you see this failure, recompile RocksDB with compression libraries present on your system. Also, previously our default compression was snappy. This behavior is now changed. Now, the default compression is snappy only if it's available on the system. If it isn't we change the default to kNoCompression.

## 3.11.0 (5/19/2015)
### New Features
Expand Down
41 changes: 23 additions & 18 deletions db/column_family.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include "db/version_set.h"
#include "db/write_controller.h"
#include "util/autovector.h"
#include "util/compression.h"
#include "util/hash_skiplist_rep.h"
#include "util/options_helper.h"
#include "util/thread_status_util.h"
Expand Down Expand Up @@ -87,6 +88,28 @@ void GetIntTblPropCollectorFactory(
new InternalKeyPropertiesCollectorFactory);
}

Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options) {
if (!cf_options.compression_per_level.empty()) {
for (size_t level = 0; level < cf_options.compression_per_level.size();
++level) {
if (!CompressionTypeSupported(cf_options.compression_per_level[level])) {
return Status::InvalidArgument(
"Compression type " +
CompressionTypeToString(cf_options.compression_per_level[level]) +
" is not linked with the binary.");
}
}
} else {
if (!CompressionTypeSupported(cf_options.compression)) {
return Status::InvalidArgument(
"Compression type " +
CompressionTypeToString(cf_options.compression) +
" is not linked with the binary.");
}
}
return Status::OK();
}

ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options,
const InternalKeyComparator* icmp,
const ColumnFamilyOptions& src) {
Expand Down Expand Up @@ -141,24 +164,6 @@ ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options,
}
}

if (!src.compression_per_level.empty()) {
for (size_t level = 0; level < src.compression_per_level.size(); ++level) {
if (!CompressionTypeSupported(src.compression_per_level[level])) {
Log(InfoLogLevel::WARN_LEVEL, db_options.info_log,
"Compression type chosen for level %zu is not supported: %s. "
"RocksDB "
"will not compress data on level %zu.",
level, CompressionTypeToString(src.compression_per_level[level]),
level);
}
}
} else if (!CompressionTypeSupported(src.compression)) {
Log(InfoLogLevel::WARN_LEVEL, db_options.info_log,
"Compression type chosen is not supported: %s. RocksDB will not "
"compress data.",
CompressionTypeToString(src.compression));
}

if (result.compaction_style == kCompactionStyleFIFO) {
result.num_levels = 1;
// since we delete level0 files in FIFO compaction when there are too many
Expand Down
2 changes: 2 additions & 0 deletions db/column_family.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,8 @@ struct SuperVersion {
autovector<MemTable*> to_delete;
};

extern Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options);

extern ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options,
const InternalKeyComparator* icmp,
const ColumnFamilyOptions& src);
Expand Down
22 changes: 16 additions & 6 deletions db/db_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2918,6 +2918,12 @@ Status DBImpl::CreateColumnFamily(const ColumnFamilyOptions& cf_options,
ColumnFamilyHandle** handle) {
Status s;
*handle = nullptr;

s = CheckCompressionSupported(cf_options);
if (!s.ok()) {
return s;
}

{
InstrumentedMutexLock l(&mutex_);

Expand Down Expand Up @@ -4154,20 +4160,24 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
return s;
}

if (db_options.db_paths.size() > 1) {
for (auto& cfd : column_families) {
for (auto& cfd : column_families) {
s = CheckCompressionSupported(cfd.options);
if (!s.ok()) {
return s;
}
if (db_options.db_paths.size() > 1) {
if ((cfd.options.compaction_style != kCompactionStyleUniversal) &&
(cfd.options.compaction_style != kCompactionStyleLevel)) {
return Status::NotSupported(
"More than one DB paths are only supported in "
"universal and level compaction styles. ");
}
}
}

if (db_options.db_paths.size() > 4) {
return Status::NotSupported(
"More than four DB paths are not supported yet. ");
}
if (db_options.db_paths.size() > 4) {
return Status::NotSupported(
"More than four DB paths are not supported yet. ");
}

*dbptr = nullptr;
Expand Down
29 changes: 29 additions & 0 deletions db/db_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11354,6 +11354,9 @@ TEST_F(DBTest, FlushOnDestroy) {
}

TEST_F(DBTest, DynamicLevelMaxBytesBase) {
if (!Snappy_Supported() || !LZ4_Supported()) {
return;
}
// Use InMemoryEnv, or it would be too slow.
unique_ptr<Env> env(new MockEnv(env_));

Expand Down Expand Up @@ -11925,6 +11928,9 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
}

TEST_F(DBTest, DynamicLevelCompressionPerLevel2) {
if (!Snappy_Supported() || !LZ4_Supported() || !Zlib_Supported()) {
return;
}
const int kNKeys = 500;
int keys[kNKeys];
for (int i = 0; i < kNKeys; i++) {
Expand Down Expand Up @@ -12707,6 +12713,9 @@ TEST_F(DBTest, EncodeDecompressedBlockSizeTest) {
CompressionType compressions[] = {kZlibCompression, kBZip2Compression,
kLZ4Compression, kLZ4HCCompression};
for (int iter = 0; iter < 4; ++iter) {
if (!CompressionTypeSupported(compressions[iter])) {
continue;
}
// first_table_version 1 -- generate with table_version == 1, read with
// table_version == 2
// first_table_version 2 -- generate with table_version == 2, read with
Expand Down Expand Up @@ -13859,6 +13868,26 @@ TEST_F(DBTest, ForceBottommostLevelCompaction) {
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
}

TEST_F(DBTest, FailWhenCompressionNotSupportedTest) {
CompressionType compressions[] = {kZlibCompression, kBZip2Compression,
kLZ4Compression, kLZ4HCCompression};
for (int iter = 0; iter < 4; ++iter) {
if (!CompressionTypeSupported(compressions[iter])) {
// not supported, we should fail the Open()
Options options = CurrentOptions();
options.compression = compressions[iter];
ASSERT_TRUE(!TryReopen(options).ok());
// Try if CreateColumnFamily also fails
options.compression = kNoCompression;
ASSERT_OK(TryReopen(options));
ColumnFamilyOptions cf_options(options);
cf_options.compression = compressions[iter];
ColumnFamilyHandle* handle;
ASSERT_TRUE(!db_->CreateColumnFamily(cf_options, "name", &handle).ok());
}
}
}

} // namespace rocksdb

int main(int argc, char** argv) {
Expand Down
10 changes: 2 additions & 8 deletions include/rocksdb/options.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,6 @@ enum CompressionType : char {
kBZip2Compression = 0x3, kLZ4Compression = 0x4, kLZ4HCCompression = 0x5
};

// returns true if RocksDB was correctly linked with compression library and
// supports the compression type
extern bool CompressionTypeSupported(CompressionType compression_type);
// Returns a human-readable name of the compression type
extern const char* CompressionTypeToString(CompressionType compression_type);

enum CompactionStyle : char {
// level based compaction style
kCompactionStyleLevel = 0x0,
Expand Down Expand Up @@ -260,8 +254,8 @@ struct ColumnFamilyOptions {
// Compress blocks using the specified compression algorithm. This
// parameter can be changed dynamically.
//
// Default: kSnappyCompression, which gives lightweight but fast
// compression.
// Default: kSnappyCompression, if it's supported. If snappy is not linked
// with the library, the default is kNoCompression.
//
// Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz:
// ~200-500MB/s compression
Expand Down
40 changes: 40 additions & 0 deletions util/compression.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,46 @@ inline bool LZ4_Supported() {
return false;
}

inline bool CompressionTypeSupported(CompressionType compression_type) {
switch (compression_type) {
case kNoCompression:
return true;
case kSnappyCompression:
return Snappy_Supported();
case kZlibCompression:
return Zlib_Supported();
case kBZip2Compression:
return BZip2_Supported();
case kLZ4Compression:
return LZ4_Supported();
case kLZ4HCCompression:
return LZ4_Supported();
default:
assert(false);
return false;
}
}

inline std::string CompressionTypeToString(CompressionType compression_type) {
switch (compression_type) {
case kNoCompression:
return "NoCompression";
case kSnappyCompression:
return "Snappy";
case kZlibCompression:
return "Zlib";
case kBZip2Compression:
return "BZip2";
case kLZ4Compression:
return "LZ4";
case kLZ4HCCompression:
return "LZ4HC";
default:
assert(false);
return "";
}
}

// compress_format_version can have two values:
// 1 -- decompressed sizes for BZip2 and Zlib are not included in the compressed
// block. Also, decompressed sizes for LZ4 are encoded in platform-dependent
Expand Down
46 changes: 3 additions & 43 deletions util/options.cc
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ ColumnFamilyOptions::ColumnFamilyOptions()
max_write_buffer_number(2),
min_write_buffer_number_to_merge(1),
max_write_buffer_number_to_maintain(0),
compression(kSnappyCompression),
compression(Snappy_Supported() ? kSnappyCompression : kNoCompression),
prefix_extractor(nullptr),
num_levels(7),
level0_file_num_compaction_trigger(4),
Expand Down Expand Up @@ -380,11 +380,11 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
if (!compression_per_level.empty()) {
for (unsigned int i = 0; i < compression_per_level.size(); i++) {
Warn(log, " Options.compression[%d]: %s", i,
CompressionTypeToString(compression_per_level[i]));
CompressionTypeToString(compression_per_level[i]).c_str());
}
} else {
Warn(log, " Options.compression: %s",
CompressionTypeToString(compression));
CompressionTypeToString(compression).c_str());
}
Warn(log, " Options.prefix_extractor: %s",
prefix_extractor == nullptr ? "nullptr" : prefix_extractor->Name());
Expand Down Expand Up @@ -546,46 +546,6 @@ Options::PrepareForBulkLoad()
return this;
}

const char* CompressionTypeToString(CompressionType compression_type) {
switch (compression_type) {
case kNoCompression:
return "NoCompression";
case kSnappyCompression:
return "Snappy";
case kZlibCompression:
return "Zlib";
case kBZip2Compression:
return "BZip2";
case kLZ4Compression:
return "LZ4";
case kLZ4HCCompression:
return "LZ4HC";
default:
assert(false);
return "";
}
}

bool CompressionTypeSupported(CompressionType compression_type) {
switch (compression_type) {
case kNoCompression:
return true;
case kSnappyCompression:
return Snappy_Supported();
case kZlibCompression:
return Zlib_Supported();
case kBZip2Compression:
return BZip2_Supported();
case kLZ4Compression:
return LZ4_Supported();
case kLZ4HCCompression:
return LZ4_Supported();
default:
assert(false);
return false;
}
}

#ifndef ROCKSDB_LITE
// Optimization functions
ColumnFamilyOptions* ColumnFamilyOptions::OptimizeForPointLookup(
Expand Down
16 changes: 16 additions & 0 deletions utilities/spatialdb/spatial_db_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <set>

#include "rocksdb/utilities/spatial_db.h"
#include "util/compression.h"
#include "util/testharness.h"
#include "util/testutil.h"
#include "util/random.h"
Expand Down Expand Up @@ -47,6 +48,9 @@ class SpatialDBTest : public testing::Test {
};

TEST_F(SpatialDBTest, FeatureSetSerializeTest) {
if (!LZ4_Supported()) {
return;
}
FeatureSet fs;

fs.Set("a", std::string("b"));
Expand Down Expand Up @@ -94,6 +98,9 @@ TEST_F(SpatialDBTest, FeatureSetSerializeTest) {
}

TEST_F(SpatialDBTest, TestNextID) {
if (!LZ4_Supported()) {
return;
}
ASSERT_OK(SpatialDB::Create(
SpatialDBOptions(), dbname_,
{SpatialIndexOptions("simple", BoundingBox<double>(0, 0, 100, 100), 2)}));
Expand All @@ -117,6 +124,9 @@ TEST_F(SpatialDBTest, TestNextID) {
}

TEST_F(SpatialDBTest, FeatureSetTest) {
if (!LZ4_Supported()) {
return;
}
ASSERT_OK(SpatialDB::Create(
SpatialDBOptions(), dbname_,
{SpatialIndexOptions("simple", BoundingBox<double>(0, 0, 100, 100), 2)}));
Expand Down Expand Up @@ -151,6 +161,9 @@ TEST_F(SpatialDBTest, FeatureSetTest) {
}

TEST_F(SpatialDBTest, SimpleTest) {
if (!LZ4_Supported()) {
return;
}
// iter 0 -- not read only
// iter 1 -- read only
for (int iter = 0; iter < 2; ++iter) {
Expand Down Expand Up @@ -227,6 +240,9 @@ BoundingBox<double> ScaleBB(BoundingBox<int> b, double step) {
} // namespace

TEST_F(SpatialDBTest, RandomizedTest) {
if (!LZ4_Supported()) {
return;
}
Random rnd(301);
std::vector<std::pair<std::string, BoundingBox<int>>> elements;

Expand Down

0 comments on commit 760e9a9

Please sign in to comment.