Skip to content

Commit

Permalink
les: fix and slim the unit tests of les (ethereum#20247)
Browse files Browse the repository at this point in the history
* les: loose restriction of unit tests

* les: update unit tests

* les, light: slim the unit tests
  • Loading branch information
rjl493456442 authored and zsfelfoldi committed Nov 6, 2019
1 parent fc3661f commit b9bac1f
Show file tree
Hide file tree
Showing 8 changed files with 61 additions and 31 deletions.
3 changes: 3 additions & 0 deletions les/clientpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -459,6 +459,9 @@ func (f *clientPool) addBalance(id enode.ID, amount uint64, setTotal bool) {
defer func() {
c.balanceTracker.setBalance(pb.value, negBalance)
if !c.priority && pb.value > 0 {
// The capacity should be adjusted based on the requirement,
// but we have no idea about the new capacity, need a second
// call to udpate it.
c.priority = true
c.balanceTracker.addCallback(balanceCallbackZero, 0, func() { f.balanceExhausted(id) })
}
Expand Down
31 changes: 24 additions & 7 deletions les/clientpool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,14 @@ func (i poolTestPeer) freeClientId() string {

func (i poolTestPeer) updateCapacity(uint64) {}

type poolTestPeerWithCap struct {
poolTestPeer

cap uint64
}

func (i *poolTestPeerWithCap) updateCapacity(cap uint64) { i.cap = cap }

func testClientPool(t *testing.T, connLimit, clientCount, paidCount int, randomDisconnect bool) {
rand.Seed(time.Now().UnixNano())
var (
Expand Down Expand Up @@ -308,9 +316,9 @@ func TestFreeClientKickedOut(t *testing.T) {

for i := 0; i < 10; i++ {
pool.connect(poolTestPeer(i), 1)
clock.Run(100 * time.Millisecond)
clock.Run(time.Millisecond)
}
if pool.connect(poolTestPeer(11), 1) {
if pool.connect(poolTestPeer(10), 1) {
t.Fatalf("New free client should be rejected")
}
clock.Run(5 * time.Minute)
Expand All @@ -320,8 +328,8 @@ func TestFreeClientKickedOut(t *testing.T) {
for i := 0; i < 10; i++ {
select {
case id := <-kicked:
if id != i {
t.Fatalf("Kicked client mismatch, want %v, got %v", i, id)
if id >= 10 {
t.Fatalf("Old client should be kicked, now got: %d", id)
}
case <-time.NewTimer(time.Second).C:
t.Fatalf("timeout")
Expand Down Expand Up @@ -364,11 +372,20 @@ func TestDowngradePriorityClient(t *testing.T) {
pool.setLimits(10, uint64(10)) // Total capacity limit is 10
pool.setPriceFactors(priceFactors{1, 0, 1}, priceFactors{1, 0, 1})

pool.addBalance(poolTestPeer(0).ID(), uint64(time.Minute), false)
pool.connect(poolTestPeer(0), 10)
p := &poolTestPeerWithCap{
poolTestPeer: poolTestPeer(0),
}
pool.addBalance(p.ID(), uint64(time.Minute), false)
pool.connect(p, 10)
if p.cap != 10 {
t.Fatalf("The capcacity of priority peer hasn't been updated, got: %d", p.cap)
}

clock.Run(time.Minute) // All positive balance should be used up.
time.Sleep(300 * time.Millisecond) // Ensure the callback is called

if p.cap != 1 {
t.Fatalf("The capcacity of peer should be downgraded, got: %d", p.cap)
}
pb := pool.ndb.getOrNewPB(poolTestPeer(0).ID())
if pb.value != 0 {
t.Fatalf("Positive balance mismatch, want %v, got %v", 0, pb.value)
Expand Down
14 changes: 8 additions & 6 deletions les/distributor.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,13 +110,15 @@ func (d *requestDistributor) registerTestPeer(p distPeer) {
d.peerLock.Unlock()
}

// distMaxWait is the maximum waiting time after which further necessary waiting
// times are recalculated based on new feedback from the servers
const distMaxWait = time.Millisecond * 50
var (
// distMaxWait is the maximum waiting time after which further necessary waiting
// times are recalculated based on new feedback from the servers
distMaxWait = time.Millisecond * 50

// waitForPeers is the time window in which a request does not fail even if it
// has no suitable peers to send to at the moment
const waitForPeers = time.Second * 3
// waitForPeers is the time window in which a request does not fail even if it
// has no suitable peers to send to at the moment
waitForPeers = time.Second * 3
)

// main event loop
func (d *requestDistributor) loop() {
Expand Down
7 changes: 5 additions & 2 deletions les/distributor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,8 @@ func (p *testDistPeer) worker(t *testing.T, checkOrder bool, stop chan struct{})
const (
testDistBufLimit = 10000000
testDistMaxCost = 1000000
testDistPeerCount = 5
testDistReqCount = 5000
testDistPeerCount = 2
testDistReqCount = 10
testDistMaxResendCount = 3
)

Expand Down Expand Up @@ -128,6 +128,9 @@ func testRequestDistributor(t *testing.T, resend bool) {
go peers[i].worker(t, !resend, stop)
dist.registerTestPeer(peers[i])
}
// Disable the mechanism that we will wait a few time for request
// even there is no suitable peer to send right now.
waitForPeers = 0

var wg sync.WaitGroup

Expand Down
7 changes: 6 additions & 1 deletion les/odr_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,9 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
if clientHead.Number.Uint64() != 4 {
t.Fatalf("Failed to sync the chain with server, head: %v", clientHead.Number.Uint64())
}
// Disable the mechanism that we will wait a few time for request
// even there is no suitable peer to send right now.
waitForPeers = 0

test := func(expFail uint64) {
// Mark this as a helper to put the failures at the correct lines
Expand All @@ -202,7 +205,9 @@ func testOdr(t *testing.T, protocol int, expFail uint64, checkCached bool, fn od
bhash := rawdb.ReadCanonicalHash(server.db, i)
b1 := fn(light.NoOdr, server.db, server.handler.server.chainConfig, server.handler.blockchain, nil, bhash)

ctx, cancel := context.WithTimeout(context.Background(), 200*time.Millisecond)
// Set the timeout as 1 second here, ensure there is enough time
// for travis to make the action.
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
b2 := fn(ctx, client.db, client.handler.backend.chainConfig, nil, client.handler.backend.blockchain, bhash)
cancel()

Expand Down
2 changes: 1 addition & 1 deletion les/sync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ func testCheckpointSyncing(t *testing.T, protocol int, syncMode int) {
for {
_, hash, _, err := server.handler.server.oracle.contract.Contract().GetLatestCheckpoint(nil)
if err != nil || hash == [32]byte{} {
time.Sleep(100 * time.Millisecond)
time.Sleep(10 * time.Millisecond)
continue
}
break
Expand Down
4 changes: 2 additions & 2 deletions les/test_helper.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,10 +71,10 @@ var (

var (
// The block frequency for creating checkpoint(only used in test)
sectionSize = big.NewInt(512)
sectionSize = big.NewInt(128)

// The number of confirmations needed to generate a checkpoint(only used in test).
processConfirms = big.NewInt(4)
processConfirms = big.NewInt(1)

// The token bucket buffer limit for testing purpose.
testBufLimit = uint64(1000000)
Expand Down
24 changes: 12 additions & 12 deletions light/postprocess.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,21 +79,21 @@ var (
}
// TestServerIndexerConfig wraps a set of configs as a test indexer config for server side.
TestServerIndexerConfig = &IndexerConfig{
ChtSize: 512,
ChtConfirms: 4,
BloomSize: 64,
BloomConfirms: 4,
BloomTrieSize: 512,
BloomTrieConfirms: 4,
ChtSize: 128,
ChtConfirms: 1,
BloomSize: 16,
BloomConfirms: 1,
BloomTrieSize: 128,
BloomTrieConfirms: 1,
}
// TestClientIndexerConfig wraps a set of configs as a test indexer config for client side.
TestClientIndexerConfig = &IndexerConfig{
ChtSize: 512,
ChtConfirms: 32,
BloomSize: 512,
BloomConfirms: 32,
BloomTrieSize: 512,
BloomTrieConfirms: 32,
ChtSize: 128,
ChtConfirms: 8,
BloomSize: 128,
BloomConfirms: 8,
BloomTrieSize: 128,
BloomTrieConfirms: 8,
}
)

Expand Down

0 comments on commit b9bac1f

Please sign in to comment.