Skip to content

Commit

Permalink
Enable MS compiler warning c4244.
Browse files Browse the repository at this point in the history
  Mostly due to the fact that there are differences in sizes of int,long
  on 64 bit systems vs GNU.
  • Loading branch information
yuslepukhin committed Dec 12, 2015
1 parent 84f9879 commit 236fe21
Show file tree
Hide file tree
Showing 27 changed files with 160 additions and 127 deletions.
4 changes: 2 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ add_custom_command(OUTPUT ${BUILD_VERSION_CC}
add_custom_target(GenerateBuildVersion DEPENDS ${BUILD_VERSION_CC})

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /WX /wd4127 /wd4267 /wd4800 /wd4996")

set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /WX /wd4127 /wd4244 /wd4800 /wd4996")

# Used to run CI build and tests so we can run faster
set(OPTIMIZE_DEBUG_DEFAULT 0) # Debug build is unoptimized by default use -DOPTDBG=1 to optimize
Expand Down Expand Up @@ -244,7 +244,7 @@ set(SOURCES
utilities/document/document_db.cc
utilities/document/json_document.cc
utilities/document/json_document_builder.cc
utilities/env_mirror.cc
utilities/env_mirror.cc
utilities/flashcache/flashcache.cc
utilities/geodb/geodb_impl.cc
utilities/leveldb_options/leveldb_options.cc
Expand Down
2 changes: 1 addition & 1 deletion db/compaction.cc
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ uint64_t Compaction::OutputFilePreallocationSize() {
}
// Over-estimate slightly so we don't end up just barely crossing
// the threshold
return preallocation_size * 1.1;
return preallocation_size + (preallocation_size / 10);
}

std::unique_ptr<CompactionFilter> Compaction::CreateCompactionFilter() const {
Expand Down
9 changes: 6 additions & 3 deletions db/compaction_job.cc
Original file line number Diff line number Diff line change
Expand Up @@ -411,9 +411,12 @@ void CompactionJob::GenSubcompactionBoundaries() {

// Group the ranges into subcompactions
const double min_file_fill_percent = 4.0 / 5;
uint64_t max_output_files = std::ceil(
sum / min_file_fill_percent /
cfd->GetCurrentMutableCFOptions()->MaxFileSizeForLevel(out_lvl));
uint64_t max_output_files =
static_cast<uint64_t>(
std::ceil(
sum / min_file_fill_percent /
cfd->GetCurrentMutableCFOptions()->MaxFileSizeForLevel(out_lvl))
);
uint64_t subcompactions =
std::min({static_cast<uint64_t>(ranges.size()),
static_cast<uint64_t>(db_options_.max_subcompactions),
Expand Down
4 changes: 2 additions & 2 deletions db/compaction_job_stats_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -551,8 +551,8 @@ uint64_t EstimatedFileSize(
const size_t kFooterSize = 512;

uint64_t data_size =
num_records * (key_size + value_size * compression_ratio +
kPerKeyOverhead);
static_cast<uint64_t>(num_records * (key_size +
value_size * compression_ratio + kPerKeyOverhead));

return data_size + kFooterSize
+ num_records * bloom_bits_per_key / 8 // filter block
Expand Down
12 changes: 7 additions & 5 deletions db/corruption_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

#include "rocksdb/db.h"

#include <inttypes.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/stat.h>
Expand Down Expand Up @@ -104,8 +105,8 @@ class CorruptionTest : public testing::Test {
}

void Check(int min_expected, int max_expected) {
unsigned int next_expected = 0;
int missed = 0;
uint64_t next_expected = 0;
uint64_t missed = 0;
int bad_keys = 0;
int bad_values = 0;
int correct = 0;
Expand All @@ -126,7 +127,7 @@ class CorruptionTest : public testing::Test {
continue;
}
missed += (key - next_expected);
next_expected = static_cast<unsigned int>(key + 1);
next_expected = key + 1;
if (iter->value() != Value(static_cast<int>(key), &value_space)) {
bad_values++;
} else {
Expand All @@ -136,8 +137,9 @@ class CorruptionTest : public testing::Test {
delete iter;

fprintf(stderr,
"expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%d\n",
min_expected, max_expected, correct, bad_keys, bad_values, missed);
"expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%llu\n",
min_expected, max_expected, correct, bad_keys, bad_values,
static_cast<unsigned long long>(missed));
ASSERT_LE(min_expected, correct);
ASSERT_GE(max_expected, correct);
}
Expand Down
47 changes: 24 additions & 23 deletions db/db_bench.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1194,15 +1194,15 @@ static std::unordered_map<OperationType, std::string, std::hash<unsigned char>>
class Stats {
private:
int id_;
double start_;
double finish_;
uint64_t start_;
uint64_t finish_;
double seconds_;
int64_t done_;
int64_t last_report_done_;
int64_t next_report_;
int64_t bytes_;
double last_op_finish_;
double last_report_finish_;
uint64_t done_;
uint64_t last_report_done_;
uint64_t next_report_;
uint64_t bytes_;
uint64_t last_op_finish_;
uint64_t last_report_finish_;
std::unordered_map<OperationType, HistogramImpl,
std::hash<unsigned char>> hist_;
std::string message_;
Expand Down Expand Up @@ -1304,8 +1304,8 @@ class Stats {
reporter_agent_->ReportFinishedOps(num_ops);
}
if (FLAGS_histogram) {
double now = FLAGS_env->NowMicros();
double micros = now - last_op_finish_;
uint64_t now = FLAGS_env->NowMicros();
uint64_t micros = now - last_op_finish_;

if (hist_.find(op_type) == hist_.end())
{
Expand All @@ -1315,7 +1315,7 @@ class Stats {
hist_[op_type].Add(micros);

if (micros > 20000 && !FLAGS_stats_interval) {
fprintf(stderr, "long op: %.1f micros%30s\r", micros, "");
fprintf(stderr, "long op: %" PRIu64 " micros%30s\r", micros, "");
fflush(stderr);
}
last_op_finish_ = now;
Expand All @@ -1333,7 +1333,7 @@ class Stats {
else next_report_ += 100000;
fprintf(stderr, "... finished %" PRIu64 " ops%30s\r", done_, "");
} else {
double now = FLAGS_env->NowMicros();
uint64_t now = FLAGS_env->NowMicros();
int64_t usecs_since_last = now - last_report_finish_;

// Determine whether to print status where interval is either
Expand All @@ -1349,7 +1349,7 @@ class Stats {
fprintf(stderr,
"%s ... thread %d: (%" PRIu64 ",%" PRIu64 ") ops and "
"(%.1f,%.1f) ops/second in (%.6f,%.6f) seconds\n",
FLAGS_env->TimeToString((uint64_t) now/1000000).c_str(),
FLAGS_env->TimeToString(now/1000000).c_str(),
id_,
done_ - last_report_done_, done_,
(done_ - last_report_done_) /
Expand Down Expand Up @@ -1503,7 +1503,7 @@ struct ThreadState {

class Duration {
public:
Duration(int max_seconds, int64_t max_ops, int64_t ops_per_stage = 0) {
Duration(uint64_t max_seconds, int64_t max_ops, int64_t ops_per_stage = 0) {
max_seconds_ = max_seconds;
max_ops_= max_ops;
ops_per_stage_ = (ops_per_stage > 0) ? ops_per_stage : max_ops;
Expand All @@ -1520,8 +1520,8 @@ class Duration {
if (max_seconds_) {
// Recheck every appx 1000 ops (exact iff increment is factor of 1000)
if ((ops_/1000) != ((ops_-increment)/1000)) {
double now = FLAGS_env->NowMicros();
return ((now - start_at_) / 1000000.0) >= max_seconds_;
uint64_t now = FLAGS_env->NowMicros();
return ((now - start_at_) / 1000000) >= max_seconds_;
} else {
return false;
}
Expand All @@ -1531,11 +1531,11 @@ class Duration {
}

private:
int max_seconds_;
uint64_t max_seconds_;
int64_t max_ops_;
int64_t ops_per_stage_;
int64_t ops_;
double start_at_;
uint64_t start_at_;
};

class Benchmark {
Expand Down Expand Up @@ -3214,6 +3214,7 @@ class Benchmark {
if (thread->tid > 0) {
ReadRandom(thread);
} else {
BGWriter(thread, kPut);
BGWriter(thread, kWrite);
}
}
Expand All @@ -3229,7 +3230,7 @@ class Benchmark {
void BGWriter(ThreadState* thread, enum OperationType write_merge) {
// Special thread that keeps writing until other threads are done.
RandomGenerator gen;
double last = FLAGS_env->NowMicros();
uint64_t last = FLAGS_env->NowMicros();
int writes_per_second_by_10 = 0;
int num_writes = 0;
int64_t bytes = 0;
Expand Down Expand Up @@ -3274,14 +3275,14 @@ class Benchmark {

++num_writes;
if (writes_per_second_by_10 && num_writes >= writes_per_second_by_10) {
double now = FLAGS_env->NowMicros();
double usecs_since_last = now - last;
uint64_t now = FLAGS_env->NowMicros();
uint64_t usecs_since_last = now - last;

num_writes = 0;
last = now;

if (usecs_since_last < 100000.0) {
FLAGS_env->SleepForMicroseconds(100000.0 - usecs_since_last);
if (usecs_since_last < 100000) {
FLAGS_env->SleepForMicroseconds(static_cast<int>(100000 - usecs_since_last));
last = FLAGS_env->NowMicros();
}
}
Expand Down
7 changes: 4 additions & 3 deletions db/db_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4315,8 +4315,9 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
if (s.ok()) {
// Our final size should be less than write_buffer_size
// (compression, etc) but err on the side of caution.
lfile->SetPreallocationBlockSize(1.1 *
mutable_cf_options.write_buffer_size);
lfile->SetPreallocationBlockSize(
mutable_cf_options.write_buffer_size / 10 +
mutable_cf_options.write_buffer_size);
unique_ptr<WritableFileWriter> file_writer(
new WritableFileWriter(std::move(lfile), opt_env_opt));
new_log = new log::Writer(std::move(file_writer), new_log_number,
Expand Down Expand Up @@ -4993,7 +4994,7 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
LogFileName(impl->db_options_.wal_dir, new_log_number),
&lfile, opt_env_options);
if (s.ok()) {
lfile->SetPreallocationBlockSize(1.1 * max_write_buffer_size);
lfile->SetPreallocationBlockSize((max_write_buffer_size / 10) + max_write_buffer_size);
impl->logfile_number_ = new_log_number;
unique_ptr<WritableFileWriter> file_writer(
new WritableFileWriter(std::move(lfile), opt_env_options));
Expand Down
24 changes: 13 additions & 11 deletions db/db_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@

namespace rocksdb {

static long TestGetTickerCount(const Options& options, Tickers ticker_type) {
static uint64_t TestGetTickerCount(const Options& options, Tickers ticker_type) {
return options.statistics->getTickerCount(ticker_type);
}

Expand Down Expand Up @@ -1231,8 +1231,8 @@ TEST_F(DBTest, KeyMayExist) {
ASSERT_OK(Flush(1));
value.clear();

long numopen = TestGetTickerCount(options, NO_FILE_OPENS);
long cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
uint64_t numopen = TestGetTickerCount(options, NO_FILE_OPENS);
uint64_t cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
ASSERT_TRUE(
db_->KeyMayExist(ropts, handles_[1], "a", &value, &value_found));
ASSERT_TRUE(!value_found);
Expand Down Expand Up @@ -1300,8 +1300,8 @@ TEST_F(DBTest, NonBlockingIteration) {

// verify that a non-blocking iterator does not find any
// kvs. Neither does it do any IOs to storage.
long numopen = TestGetTickerCount(options, NO_FILE_OPENS);
long cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
uint64_t numopen = TestGetTickerCount(options, NO_FILE_OPENS);
uint64_t cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
iter = db_->NewIterator(non_blocking_opts, handles_[1]);
count = 0;
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
Expand Down Expand Up @@ -5227,15 +5227,17 @@ class RecoveryTestHelper {
test->Close();
#endif
if (trunc) {
ASSERT_EQ(0, truncate(fname.c_str(), size * off));
ASSERT_EQ(0, truncate(fname.c_str(),
static_cast<int64_t>(size * off)));
} else {
InduceCorruption(fname, size * off, size * len);
InduceCorruption(fname, static_cast<size_t>(size * off),
static_cast<size_t>(size * len));
}
}

// Overwrite data with 'a' from offset for length len
static void InduceCorruption(const std::string& filename, uint32_t offset,
uint32_t len) {
static void InduceCorruption(const std::string& filename, size_t offset,
size_t len) {
ASSERT_GT(len, 0U);

int fd = open(filename.c_str(), O_RDWR);
Expand Down Expand Up @@ -6439,7 +6441,7 @@ TEST_F(DBTest, RateLimitingTest) {
RandomString(&rnd, (1 << 10) + 1), wo));
}
uint64_t elapsed = env_->NowMicros() - start;
double raw_rate = env_->bytes_written_ * 1000000 / elapsed;
double raw_rate = env_->bytes_written_ * 1000000.0 / elapsed;
Close();

// # rate limiting with 0.7 x threshold
Expand Down Expand Up @@ -8126,7 +8128,7 @@ TEST_F(DBTest, MutexWaitStats) {
options.create_if_missing = true;
options.statistics = rocksdb::CreateDBStatistics();
CreateAndReopenWithCF({"pikachu"}, options);
const int64_t kMutexWaitDelay = 100;
const uint64_t kMutexWaitDelay = 100;
ThreadStatusUtil::TEST_SetStateDelay(
ThreadStatus::STATE_MUTEX_WAIT, kMutexWaitDelay);
ASSERT_OK(Put("hello", "rocksdb"));
Expand Down
5 changes: 4 additions & 1 deletion db/event_helpers.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,10 @@
namespace rocksdb {

namespace {
inline double SafeDivide(double a, double b) { return b == 0.0 ? 0 : a / b; }
template<class T>
inline T SafeDivide(T a, T b) {
return b == 0 ? 0 : a / b;
}
} // namespace

void EventHelpers::AppendCurrentTime(JSONWriter* jwriter) {
Expand Down
3 changes: 2 additions & 1 deletion db/version_edit.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,8 @@ struct FileDescriptor {
return packed_number_and_path_id & kFileNumberMask;
}
uint32_t GetPathId() const {
return packed_number_and_path_id / (kFileNumberMask + 1);
return static_cast<uint32_t>(
packed_number_and_path_id / (kFileNumberMask + 1));
}
uint64_t GetFileSize() const { return file_size; }
};
Expand Down
13 changes: 8 additions & 5 deletions db/version_set.cc
Original file line number Diff line number Diff line change
Expand Up @@ -738,24 +738,27 @@ uint64_t VersionStorageInfo::GetEstimatedActiveKeys() const {
// (2) keys are directly overwritten
// (3) deletion on non-existing keys
// (4) low number of samples
if (current_num_samples_ == 0) {
if (num_samples_ == 0) {
return 0;
}

if (current_num_non_deletions_ <= current_num_deletions_) {
if (accumulated_num_non_deletions_ <= accumulated_num_deletions_) {
return 0;
}

uint64_t est = current_num_non_deletions_ - current_num_deletions_;
uint64_t est = accumulated_num_non_deletions_ - accumulated_num_deletions_;

uint64_t file_count = 0;
for (int level = 0; level < num_levels_; ++level) {
file_count += files_[level].size();
}

if (current_num_samples_ < file_count) {
if (num_samples_ < file_count) {
// casting to avoid overflowing
return (est * static_cast<double>(file_count) / current_num_samples_);
return
static_cast<uint64_t>(
(est * static_cast<double>(file_count) / current_num_samples_);
);
} else {
return est;
}
Expand Down
7 changes: 5 additions & 2 deletions memtable/hash_cuckoo_rep.cc
Original file line number Diff line number Diff line change
Expand Up @@ -624,8 +624,9 @@ MemTableRep* HashCuckooRepFactory::CreateMemTableRep(
size_t pointer_size = sizeof(std::atomic<const char*>);
assert(write_buffer_size_ >= (average_data_size_ + pointer_size));
size_t bucket_count =
static_cast<size_t>(
(write_buffer_size_ / (average_data_size_ + pointer_size)) / kFullness +
1;
1);
unsigned int hash_function_count = hash_function_count_;
if (hash_function_count < 2) {
hash_function_count = 2;
Expand All @@ -635,7 +636,9 @@ MemTableRep* HashCuckooRepFactory::CreateMemTableRep(
}
return new HashCuckooRep(compare, allocator, bucket_count,
hash_function_count,
(average_data_size_ + pointer_size) / kFullness);
static_cast<size_t>(
(average_data_size_ + pointer_size) / kFullness)
);
}

MemTableRepFactory* NewHashCuckooRepFactory(size_t write_buffer_size,
Expand Down
Loading

0 comments on commit 236fe21

Please sign in to comment.