Skip to content

Commit

Permalink
Improve benchmark scripts
Browse files Browse the repository at this point in the history
Summary:
This adds:
1) use of --level_compaction_dynamic_level_bytes=true
2) use of --bytes_per_sync=2M
The second is a big win for disks. The first helps in general.

This also adds a new test, fillseq with 32kb values to increase the peak
ingest and make it more likely that storage limits throughput.

Sample outpout from the first 3 tests - https://gist.github.com/mdcallag/e793bd3038e367b05d6f

Task ID: #

Blame Rev:

Test Plan:
Revert Plan:

Database Impact:

Memcache Impact:

Other Notes:

EImportant:

- begin *PUBLIC* platform impact section -
Bugzilla: #
- end platform impact -

Reviewers: igor

Reviewed By: igor

Subscribers: dhruba

Differential Revision: https://reviews.facebook.net/D37509
  • Loading branch information
mdcallag committed Apr 22, 2015
1 parent 6a5ffee commit 78dbd08
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 5 deletions.
11 changes: 7 additions & 4 deletions tools/benchmark.sh
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ duration=${DURATION:-0}
num_keys=${NUM_KEYS:-$((1 * G))}
key_size=20
value_size=${VALUE_SIZE:-400}
block_size=${BLOCK_SIZE:-4096}

const_params="
--db=$DB_DIR \
Expand All @@ -56,12 +57,14 @@ const_params="
--num_levels=6 \
--key_size=$key_size \
--value_size=$value_size \
--block_size=4096 \
--block_size=$block_size \
--cache_size=$cache_size \
--cache_numshardbits=6 \
--compression_type=zlib \
--min_level_to_compress=3 \
--compression_ratio=0.5 \
--level_compaction_dynamic_level_bytes=true \
--bytes_per_sync=$((2 * M)) \
\
--hard_rate_limit=3 \
--rate_limit_delay_max_milliseconds=1000000 \
Expand Down Expand Up @@ -163,10 +166,10 @@ function run_fillseq {
--threads=1 \
--memtablerep=vector \
--disable_wal=1 \
2>&1 | tee -a $output_dir/benchmark_fillseq.log"
echo $cmd | tee $output_dir/benchmark_fillseq.log
2>&1 | tee -a $output_dir/benchmark_fillseq.v${value_size}.log"
echo $cmd | tee $output_dir/benchmark_fillseq.v${value_size}.log
eval $cmd
summarize_result $output_dir/benchmark_fillseq.log fillseq fillseq
summarize_result $output_dir/benchmark_fillseq.v${value_size}.log fillseq.v${value_size} fillseq
}

function run_change {
Expand Down
10 changes: 9 additions & 1 deletion tools/run_flash_bench.sh
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
# You can estimate the size of the test database from this,
# NKEYS and the compression rate (--compression_ratio) set
# in tools/benchmark.sh
# BLOCK_LENGTH - value for db_bench --block_size
# CACHE_BYTES - the size of the RocksDB block cache in bytes
# DATA_DIR - directory in which to create database files
# LOG_DIR - directory in which to create WAL files, may be the same
Expand All @@ -61,6 +62,7 @@ duration=${NSECONDS:-$((60 * 60))}
nps=${RANGE_LIMIT:-10}
vs=${VAL_SIZE:-400}
cs=${CACHE_BYTES:-$(( 1 * G ))}
bs=${BLOCK_LENGTH:-4096}

# If no command line arguments then run for 24 threads.
if [[ $# -eq 0 ]]; then
Expand Down Expand Up @@ -88,6 +90,7 @@ NUM_KEYS=$num_keys \
DB_DIR=$db_dir \
WAL_DIR=$wal_dir \
VALUE_SIZE=$vs \
BLOCK_SIZE=$bs \
CACHE_SIZE=$cs"

mkdir -p $output_dir
Expand All @@ -109,7 +112,12 @@ if [[ $do_setup != 0 ]]; then
# Test 1: bulk load
env $ARGS ./tools/benchmark.sh bulkload

# Test 2: sequential fill
# Test 2a: sequential fill with large values to get peak ingest
# adjust NUM_KEYS given the use of larger values
env $ARGS BLOCK_SIZE=$((1 * M)) VALUE_SIZE=$((32 * K)) NUM_KEYS=$(( num_keys / 64 )) \
./tools/benchmark.sh fillseq

# Test 2b: sequential fill with the configured value size
env $ARGS ./tools/benchmark.sh fillseq

# Test 3: single-threaded overwrite
Expand Down

0 comments on commit 78dbd08

Please sign in to comment.