From 0ea65d4020dd1fd2b19cbad5c90ea1bb0dd9ddc4 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Fri, 10 Feb 2023 17:35:00 +0800 Subject: [PATCH] ethdb: add benchmark test suite (#26659) --- ethdb/dbtest/testsuite.go | 118 ++++++++++++++++++++++++++++++++++ ethdb/leveldb/leveldb_test.go | 12 ++++ ethdb/pebble/pebble.go | 4 +- ethdb/pebble/pebble_test.go | 14 ++++ 4 files changed, 147 insertions(+), 1 deletion(-) diff --git a/ethdb/dbtest/testsuite.go b/ethdb/dbtest/testsuite.go index 6b206af48d5e..06a2d330db55 100644 --- a/ethdb/dbtest/testsuite.go +++ b/ethdb/dbtest/testsuite.go @@ -18,6 +18,7 @@ package dbtest import ( "bytes" + "math/rand" "reflect" "sort" "testing" @@ -377,6 +378,101 @@ func TestDatabaseSuite(t *testing.T, New func() ethdb.KeyValueStore) { }) } +// BenchDatabaseSuite runs a suite of benchmarks against a KeyValueStore database +// implementation. +func BenchDatabaseSuite(b *testing.B, New func() ethdb.KeyValueStore) { + var ( + keys, vals = makeDataset(1_000_000, 32, 32, false) + sKeys, sVals = makeDataset(1_000_000, 32, 32, true) + ) + // Run benchmarks sequentially + b.Run("Write", func(b *testing.B) { + benchWrite := func(b *testing.B, keys, vals [][]byte) { + b.ResetTimer() + b.ReportAllocs() + + db := New() + defer db.Close() + + for i := 0; i < len(keys); i++ { + db.Put(keys[i], vals[i]) + } + } + b.Run("WriteSorted", func(b *testing.B) { + benchWrite(b, sKeys, sVals) + }) + b.Run("WriteRandom", func(b *testing.B) { + benchWrite(b, keys, vals) + }) + }) + b.Run("Read", func(b *testing.B) { + benchRead := func(b *testing.B, keys, vals [][]byte) { + db := New() + defer db.Close() + + for i := 0; i < len(keys); i++ { + db.Put(keys[i], vals[i]) + } + b.ResetTimer() + b.ReportAllocs() + + for i := 0; i < len(keys); i++ { + db.Get(keys[i]) + } + } + b.Run("ReadSorted", func(b *testing.B) { + benchRead(b, sKeys, sVals) + }) + b.Run("ReadRandom", func(b *testing.B) { + benchRead(b, keys, vals) + }) + }) + b.Run("Iteration", func(b *testing.B) { + benchIteration := func(b *testing.B, keys, vals [][]byte) { + db := New() + defer db.Close() + + for i := 0; i < len(keys); i++ { + db.Put(keys[i], vals[i]) + } + b.ResetTimer() + b.ReportAllocs() + + it := db.NewIterator(nil, nil) + for it.Next() { + } + it.Release() + } + b.Run("IterationSorted", func(b *testing.B) { + benchIteration(b, sKeys, sVals) + }) + b.Run("IterationRandom", func(b *testing.B) { + benchIteration(b, keys, vals) + }) + }) + b.Run("BatchWrite", func(b *testing.B) { + benchBatchWrite := func(b *testing.B, keys, vals [][]byte) { + b.ResetTimer() + b.ReportAllocs() + + db := New() + defer db.Close() + + batch := db.NewBatch() + for i := 0; i < len(keys); i++ { + batch.Put(keys[i], vals[i]) + } + batch.Write() + } + b.Run("BenchWriteSorted", func(b *testing.B) { + benchBatchWrite(b, sKeys, sVals) + }) + b.Run("BenchWriteRandom", func(b *testing.B) { + benchBatchWrite(b, keys, vals) + }) + }) +} + func iterateKeys(it ethdb.Iterator) []string { keys := []string{} for it.Next() { @@ -386,3 +482,25 @@ func iterateKeys(it ethdb.Iterator) []string { it.Release() return keys } + +// randomHash generates a random blob of data and returns it as a hash. +func randBytes(len int) []byte { + buf := make([]byte, len) + if n, err := rand.Read(buf); n != len || err != nil { + panic(err) + } + return buf +} + +func makeDataset(size, ksize, vsize int, order bool) ([][]byte, [][]byte) { + var keys [][]byte + var vals [][]byte + for i := 0; i < size; i += 1 { + keys = append(keys, randBytes(ksize)) + vals = append(vals, randBytes(vsize)) + } + if order { + sort.Slice(keys, func(i, j int) bool { return bytes.Compare(keys[i], keys[j]) < 0 }) + } + return keys, vals +} diff --git a/ethdb/leveldb/leveldb_test.go b/ethdb/leveldb/leveldb_test.go index 421d9b4693f4..d8c63860161f 100644 --- a/ethdb/leveldb/leveldb_test.go +++ b/ethdb/leveldb/leveldb_test.go @@ -38,3 +38,15 @@ func TestLevelDB(t *testing.T) { }) }) } + +func BenchmarkLevelDB(b *testing.B) { + dbtest.BenchDatabaseSuite(b, func() ethdb.KeyValueStore { + db, err := leveldb.Open(storage.NewMemStorage(), nil) + if err != nil { + b.Fatal(err) + } + return &Database{ + db: db, + } + }) +} diff --git a/ethdb/pebble/pebble.go b/ethdb/pebble/pebble.go index 5ba7e03c25dd..fa27efd96347 100644 --- a/ethdb/pebble/pebble.go +++ b/ethdb/pebble/pebble.go @@ -272,7 +272,9 @@ func (d *Database) NewBatch() ethdb.Batch { } // NewBatchWithSize creates a write-only database batch with pre-allocated buffer. -// TODO can't do this with pebble. Batches are allocated in a pool so maybe this doesn't matter? +// It's not supported by pebble, but pebble has better memory allocation strategy +// which turns out a lot faster than leveldb. It's performant enough to construct +// batch object without any pre-allocated space. func (d *Database) NewBatchWithSize(_ int) ethdb.Batch { return &batch{ b: d.db.NewBatch(), diff --git a/ethdb/pebble/pebble_test.go b/ethdb/pebble/pebble_test.go index 18b800e8aba9..c773967dc66f 100644 --- a/ethdb/pebble/pebble_test.go +++ b/ethdb/pebble/pebble_test.go @@ -42,3 +42,17 @@ func TestPebbleDB(t *testing.T) { }) }) } + +func BenchmarkPebbleDB(b *testing.B) { + dbtest.BenchDatabaseSuite(b, func() ethdb.KeyValueStore { + db, err := pebble.Open("", &pebble.Options{ + FS: vfs.NewMem(), + }) + if err != nil { + b.Fatal(err) + } + return &Database{ + db: db, + } + }) +}