Skip to content

Commit

Permalink
Turn on -Wshorten-64-to-32 and fix all the errors
Browse files Browse the repository at this point in the history
Summary:
We need to turn on -Wshorten-64-to-32 for mobile. See D1671432 (internal phabricator) for details.

This diff turns on the warning flag and fixes all the errors. There were also some interesting errors that I might call bugs, especially in plain table. Going forward, I think it makes sense to have this flag turned on and be very very careful when converting 64-bit to 32-bit variables.

Test Plan: compiles

Reviewers: ljin, rven, yhchiang, sdong

Reviewed By: yhchiang

Subscribers: bobbaldwin, dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D28689
  • Loading branch information
igorcanadi committed Nov 11, 2014
1 parent 113796c commit 767777c
Show file tree
Hide file tree
Showing 106 changed files with 584 additions and 505 deletions.
1 change: 0 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,6 @@ TESTS = \
cuckoo_table_builder_test \
cuckoo_table_reader_test \
cuckoo_table_db_test \
write_batch_with_index_test \
flush_job_test \
wal_manager_test \
listener_test \
Expand Down
8 changes: 8 additions & 0 deletions build_tools/build_detect_platform
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,14 @@ EOF
fi
fi

# Test whether -Wshorten-64-to-32 is available
$CXX $CFLAGS -x c++ - -o /dev/null -Wshorten-64-to-32 2>/dev/null <<EOF
int main() {}
EOF
if [ "$?" = 0 ]; then
COMMON_FLAGS="$COMMON_FLAGS -Wshorten-64-to-32"
fi

# shall we use HDFS?

if test "$USE_HDFS"; then
Expand Down
12 changes: 5 additions & 7 deletions db/c.cc
Original file line number Diff line number Diff line change
Expand Up @@ -385,11 +385,9 @@ struct rocksdb_mergeoperator_t : public MergeOperator {
unsigned char success;
size_t new_value_len;
char* tmp_new_value = (*full_merge_)(
state_,
key.data(), key.size(),
existing_value_data, existing_value_len,
&operand_pointers[0], &operand_sizes[0], n,
&success, &new_value_len);
state_, key.data(), key.size(), existing_value_data, existing_value_len,
&operand_pointers[0], &operand_sizes[0], static_cast<int>(n), &success,
&new_value_len);
new_value->assign(tmp_new_value, new_value_len);

if (delete_value_ != nullptr) {
Expand Down Expand Up @@ -417,7 +415,7 @@ struct rocksdb_mergeoperator_t : public MergeOperator {
size_t new_value_len;
char* tmp_new_value = (*partial_merge_)(
state_, key.data(), key.size(), &operand_pointers[0], &operand_sizes[0],
operand_count, &success, &new_value_len);
static_cast<int>(operand_count), &success, &new_value_len);
new_value->assign(tmp_new_value, new_value_len);

if (delete_value_ != nullptr) {
Expand Down Expand Up @@ -2041,7 +2039,7 @@ void rocksdb_options_set_min_level_to_compress(rocksdb_options_t* opt, int level

int rocksdb_livefiles_count(
const rocksdb_livefiles_t* lf) {
return lf->rep.size();
return static_cast<int>(lf->rep.size());
}

const char* rocksdb_livefiles_name(
Expand Down
2 changes: 1 addition & 1 deletion db/c_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ static void CmpDestroy(void* arg) { }

static int CmpCompare(void* arg, const char* a, size_t alen,
const char* b, size_t blen) {
int n = (alen < blen) ? alen : blen;
size_t n = (alen < blen) ? alen : blen;
int r = memcmp(a, b, n);
if (r == 0) {
if (alen < blen) r = -1;
Expand Down
12 changes: 6 additions & 6 deletions db/column_family_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ class ColumnFamilyTest {
void CreateColumnFamilies(
const std::vector<std::string>& cfs,
const std::vector<ColumnFamilyOptions> options = {}) {
int cfi = handles_.size();
int cfi = static_cast<int>(handles_.size());
handles_.resize(cfi + cfs.size());
names_.resize(cfi + cfs.size());
for (size_t i = 0; i < cfs.size(); ++i) {
Expand Down Expand Up @@ -231,7 +231,7 @@ class ColumnFamilyTest {
snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
result += buf;
if (f > 0) {
last_non_zero_offset = result.size();
last_non_zero_offset = static_cast<int>(result.size());
}
}
result.resize(last_non_zero_offset);
Expand Down Expand Up @@ -287,8 +287,8 @@ class ColumnFamilyTest {
assert(num_per_cf.size() == handles_.size());

for (size_t i = 0; i < num_per_cf.size(); ++i) {
ASSERT_EQ(num_per_cf[i],
GetProperty(i, "rocksdb.num-immutable-mem-table"));
ASSERT_EQ(num_per_cf[i], GetProperty(static_cast<int>(i),
"rocksdb.num-immutable-mem-table"));
}
}

Expand Down Expand Up @@ -916,11 +916,11 @@ TEST(ColumnFamilyTest, DontRollEmptyLogs) {
CreateColumnFamiliesAndReopen({"one", "two", "three", "four"});

for (size_t i = 0; i < handles_.size(); ++i) {
PutRandomData(i, 10, 100);
PutRandomData(static_cast<int>(i), 10, 100);
}
int num_writable_file_start = env_->GetNumberOfNewWritableFileCalls();
// this will trigger the flushes
for (size_t i = 0; i <= 4; ++i) {
for (int i = 0; i <= 4; ++i) {
ASSERT_OK(Flush(i));
}

Expand Down
14 changes: 7 additions & 7 deletions db/compaction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,9 @@ Compaction::~Compaction() {

void Compaction::GenerateFileLevels() {
input_levels_.resize(num_input_levels());
for (int which = 0; which < num_input_levels(); which++) {
DoGenerateLevelFilesBrief(
&input_levels_[which], inputs_[which].files, &arena_);
for (size_t which = 0; which < num_input_levels(); which++) {
DoGenerateLevelFilesBrief(&input_levels_[which], inputs_[which].files,
&arena_);
}
}

Expand All @@ -144,7 +144,7 @@ bool Compaction::IsTrivialMove() const {
}

void Compaction::AddInputDeletions(VersionEdit* out_edit) {
for (int which = 0; which < num_input_levels(); which++) {
for (size_t which = 0; which < num_input_levels(); which++) {
for (size_t i = 0; i < inputs_[which].size(); i++) {
out_edit->DeleteFile(level(which), inputs_[which][i]->fd.GetNumber());
}
Expand Down Expand Up @@ -207,7 +207,7 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {

// Mark (or clear) each file that is being compacted
void Compaction::MarkFilesBeingCompacted(bool mark_as_compacted) {
for (int i = 0; i < num_input_levels(); i++) {
for (size_t i = 0; i < num_input_levels(); i++) {
for (unsigned int j = 0; j < inputs_[i].size(); j++) {
assert(mark_as_compacted ? !inputs_[i][j]->being_compacted :
inputs_[i][j]->being_compacted);
Expand Down Expand Up @@ -293,7 +293,7 @@ void Compaction::Summary(char* output, int len) {
return;
}

for (int level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
for (size_t level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
if (level_iter > 0) {
write += snprintf(output + write, len - write, "], [");
if (write < 0 || write >= len) {
Expand All @@ -317,7 +317,7 @@ uint64_t Compaction::OutputFilePreallocationSize(
if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel) {
preallocation_size = mutable_options.MaxFileSizeForLevel(output_level());
} else {
for (int level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
for (size_t level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
for (const auto& f : inputs_[level_iter].files) {
preallocation_size += f->fd.GetFileSize();
}
Expand Down
12 changes: 6 additions & 6 deletions db/compaction.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ struct CompactionInputFiles {
inline bool empty() const { return files.empty(); }
inline size_t size() const { return files.size(); }
inline void clear() { files.clear(); }
inline FileMetaData* operator[](int i) const { return files[i]; }
inline FileMetaData* operator[](size_t i) const { return files[i]; }
};

class Version;
Expand All @@ -48,15 +48,15 @@ class Compaction {

// Returns the level associated to the specified compaction input level.
// If compaction_input_level is not specified, then input_level is set to 0.
int level(int compaction_input_level = 0) const {
int level(size_t compaction_input_level = 0) const {
return inputs_[compaction_input_level].level;
}

// Outputs will go to this level
int output_level() const { return output_level_; }

// Returns the number of input levels in this compaction.
int num_input_levels() const { return inputs_.size(); }
size_t num_input_levels() const { return inputs_.size(); }

// Return the object that holds the edits to the descriptor done
// by this compaction.
Expand All @@ -66,7 +66,7 @@ class Compaction {
// compaction input level.
// The function will return 0 if when "compaction_input_level" < 0
// or "compaction_input_level" >= "num_input_levels()".
int num_input_files(size_t compaction_input_level) const {
size_t num_input_files(size_t compaction_input_level) const {
if (compaction_input_level < inputs_.size()) {
return inputs_[compaction_input_level].size();
}
Expand All @@ -83,7 +83,7 @@ class Compaction {
// specified compaction input level.
// REQUIREMENT: "compaction_input_level" must be >= 0 and
// < "input_levels()"
FileMetaData* input(size_t compaction_input_level, int i) const {
FileMetaData* input(size_t compaction_input_level, size_t i) const {
assert(compaction_input_level < inputs_.size());
return inputs_[compaction_input_level][i];
}
Expand All @@ -98,7 +98,7 @@ class Compaction {
}

// Returns the LevelFilesBrief of the specified compaction input level.
LevelFilesBrief* input_levels(int compaction_input_level) {
LevelFilesBrief* input_levels(size_t compaction_input_level) {
return &input_levels_[compaction_input_level];
}

Expand Down
15 changes: 8 additions & 7 deletions db/compaction_job.cc
Original file line number Diff line number Diff line change
Expand Up @@ -415,32 +415,33 @@ Status CompactionJob::Run() {
}

compaction_stats_.micros = env_->NowMicros() - start_micros - imm_micros;
compaction_stats_.files_in_leveln = compact_->compaction->num_input_files(0);
compaction_stats_.files_in_leveln =
static_cast<int>(compact_->compaction->num_input_files(0));
compaction_stats_.files_in_levelnp1 =
compact_->compaction->num_input_files(1);
static_cast<int>(compact_->compaction->num_input_files(1));
MeasureTime(stats_, COMPACTION_TIME, compaction_stats_.micros);

int num_output_files = compact_->outputs.size();
size_t num_output_files = compact_->outputs.size();
if (compact_->builder != nullptr) {
// An error occurred so ignore the last output.
assert(num_output_files > 0);
--num_output_files;
}
compaction_stats_.files_out_levelnp1 = num_output_files;
compaction_stats_.files_out_levelnp1 = static_cast<int>(num_output_files);

for (int i = 0; i < compact_->compaction->num_input_files(0); i++) {
for (size_t i = 0; i < compact_->compaction->num_input_files(0); i++) {
compaction_stats_.bytes_readn +=
compact_->compaction->input(0, i)->fd.GetFileSize();
compaction_stats_.num_input_records +=
static_cast<uint64_t>(compact_->compaction->input(0, i)->num_entries);
}

for (int i = 0; i < compact_->compaction->num_input_files(1); i++) {
for (size_t i = 0; i < compact_->compaction->num_input_files(1); i++) {
compaction_stats_.bytes_readnp1 +=
compact_->compaction->input(1, i)->fd.GetFileSize();
}

for (int i = 0; i < num_output_files; i++) {
for (size_t i = 0; i < num_output_files; i++) {
compaction_stats_.bytes_written += compact_->outputs[i].file_size;
}
if (compact_->num_input_records > compact_->num_output_records) {
Expand Down
10 changes: 5 additions & 5 deletions db/compaction_picker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ CompressionType GetCompressionType(
// If the use has specified a different compression level for each level,
// then pick the compression for that level.
if (!ioptions.compression_per_level.empty()) {
const int n = ioptions.compression_per_level.size() - 1;
const int n = static_cast<int>(ioptions.compression_per_level.size()) - 1;
// It is possible for level_ to be -1; in that case, we use level
// 0's compression. This occurs mostly in backwards compatibility
// situations when the builder doesn't know what level the file
Expand Down Expand Up @@ -75,7 +75,7 @@ void CompactionPicker::SizeBeingCompacted(std::vector<uint64_t>& sizes) {
uint64_t total = 0;
for (auto c : compactions_in_progress_[level]) {
assert(c->level() == level);
for (int i = 0; i < c->num_input_files(0); i++) {
for (size_t i = 0; i < c->num_input_files(0); i++) {
total += c->input(0, i)->compensated_file_size;
}
}
Expand Down Expand Up @@ -870,7 +870,8 @@ Compaction* UniversalCompactionPicker::PickCompaction(
// If max read amplification is exceeding configured limits, then force
// compaction without looking at filesize ratios and try to reduce
// the number of files to fewer than level0_file_num_compaction_trigger.
unsigned int num_files = level_files.size() -
unsigned int num_files =
static_cast<unsigned int>(level_files.size()) -
mutable_cf_options.level0_file_num_compaction_trigger;
if ((c = PickCompactionUniversalReadAmp(
cf_name, mutable_cf_options, vstorage, score, UINT_MAX,
Expand Down Expand Up @@ -1074,8 +1075,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
if (ratio_to_compress >= 0) {
uint64_t total_size = vstorage->NumLevelBytes(kLevel0);
uint64_t older_file_size = 0;
for (unsigned int i = files.size() - 1;
i >= first_index_after; i--) {
for (size_t i = files.size() - 1; i >= first_index_after; i--) {
older_file_size += files[i]->fd.GetFileSize();
if (older_file_size * 100L >= total_size * (long) ratio_to_compress) {
enable_compression = false;
Expand Down
10 changes: 5 additions & 5 deletions db/compaction_picker_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ TEST(CompactionPickerTest, Level0Trigger) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(2, compaction->num_input_files(0));
ASSERT_EQ(2U, compaction->num_input_files(0));
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
}
Expand All @@ -121,7 +121,7 @@ TEST(CompactionPickerTest, Level1Trigger) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(1, compaction->num_input_files(0));
ASSERT_EQ(1U, compaction->num_input_files(0));
ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber());
}

Expand All @@ -136,8 +136,8 @@ TEST(CompactionPickerTest, Level1Trigger2) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(1, compaction->num_input_files(0));
ASSERT_EQ(2, compaction->num_input_files(1));
ASSERT_EQ(1U, compaction->num_input_files(0));
ASSERT_EQ(2U, compaction->num_input_files(1));
ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber());
ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber());
ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber());
Expand All @@ -164,7 +164,7 @@ TEST(CompactionPickerTest, LevelMaxScore) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(1, compaction->num_input_files(0));
ASSERT_EQ(1U, compaction->num_input_files(0));
ASSERT_EQ(7U, compaction->input(0, 0)->fd.GetNumber());
}

Expand Down
8 changes: 4 additions & 4 deletions db/comparator_db_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
}

int type = rnd->Uniform(2);
int index = rnd->Uniform(source_strings.size());
int index = rnd->Uniform(static_cast<int>(source_strings.size()));
auto& key = source_strings[index];
switch (type) {
case 0:
Expand Down Expand Up @@ -124,7 +124,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
break;
case 2: {
// Seek to random key
auto key_idx = rnd->Uniform(source_strings.size());
auto key_idx = rnd->Uniform(static_cast<int>(source_strings.size()));
auto key = source_strings[key_idx];
iter->Seek(key);
result_iter->Seek(key);
Expand All @@ -150,7 +150,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
break;
default: {
assert(type == 5);
auto key_idx = rnd->Uniform(source_strings.size());
auto key_idx = rnd->Uniform(static_cast<int>(source_strings.size()));
auto key = source_strings[key_idx];
std::string result;
auto status = db->Get(ReadOptions(), key, &result);
Expand Down Expand Up @@ -325,7 +325,7 @@ TEST(ComparatorDBTest, SimpleSuffixReverseComparator) {
source_prefixes.push_back(test::RandomHumanReadableString(&rnd, 8));
}
for (int j = 0; j < 20; j++) {
int prefix_index = rnd.Uniform(source_prefixes.size());
int prefix_index = rnd.Uniform(static_cast<int>(source_prefixes.size()));
std::string key = source_prefixes[prefix_index] +
test::RandomHumanReadableString(&rnd, rnd.Uniform(8));
source_strings.push_back(key);
Expand Down
Loading

0 comments on commit 767777c

Please sign in to comment.