Skip to content

Commit

Permalink
avoid halt syncronization in permissionless nodes (0xPolygonHermez#2536
Browse files Browse the repository at this point in the history
…) (0xPolygonHermez#2545)

* avoid halt syncronization in permissionless nodes

* logs

* logs with %d

* print always the batchL2Data
  • Loading branch information
ARR552 authored Sep 14, 2023
1 parent 191ff41 commit b083a59
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 26 deletions.
57 changes: 40 additions & 17 deletions synchronizer/synchronizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,8 @@ func (s *ClientSynchronizer) Sync() error {
metrics.FullTrustedSyncTime(time.Since(startTrusted))
if err != nil {
log.Warn("error syncing trusted state. Error: ", err)
s.trustedState.lastTrustedBatches = nil
s.trustedState.lastStateRoot = nil
continue
}
waitDuration = s.cfg.SyncInterval.Duration
Expand Down Expand Up @@ -681,7 +683,12 @@ func (s *ClientSynchronizer) checkTrustedState(batch state.Batch, tBatch *state.

if reorgReasons.Len() > 0 {
reason := reorgReasons.String()
log.Warnf("Missmatch in trusted state detected for Batch Number: %d. Reasons: %s", tBatch.BatchNumber, reason)

if tBatch.StateRoot == (common.Hash{}) {
log.Warnf("incomplete trusted batch %d detected. Syncing full batch from L1", tBatch.BatchNumber)
} else {
log.Warnf("missmatch in trusted state detected for Batch Number: %d. Reasons: %s", tBatch.BatchNumber, reason)
}
if s.isTrustedSequencer {
s.halt(s.ctx, fmt.Errorf("TRUSTED REORG DETECTED! Batch: %d", batch.BatchNumber))
}
Expand Down Expand Up @@ -927,7 +934,11 @@ func (s *ClientSynchronizer) processSequenceBatches(sequencedBatches []etherman.

// Reset trusted state
previousBatchNumber := batch.BatchNumber - 1
log.Warnf("Missmatch in trusted state detected, discarding batches until batchNum %d", previousBatchNumber)
if tBatch.StateRoot == (common.Hash{}) {
log.Warn("cleaning state before inserting batch from L1. Clean until batch: %d", previousBatchNumber)
} else {
log.Warnf("missmatch in trusted state detected, discarding batches until batchNum %d", previousBatchNumber)
}
err = s.state.ResetTrustedState(s.ctx, previousBatchNumber, dbTx) // This method has to reset the forced batches deleting the batchNumber for higher batchNumbers
if err != nil {
log.Errorf("error resetting trusted state. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, blockNumber, err)
Expand Down Expand Up @@ -1221,7 +1232,7 @@ func (s *ClientSynchronizer) processTrustedVerifyBatches(lastVerifiedBatch ether
}

func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx pgx.Tx) ([]*state.Batch, *common.Hash, error) {
log.Debugf("Processing trusted batch: %v", trustedBatch.Number)
log.Debugf("Processing trusted batch: %d", uint64(trustedBatch.Number))
trustedBatchL2Data := trustedBatch.BatchL2Data
batches := s.trustedState.lastTrustedBatches
log.Debug("len(batches): ", len(batches))
Expand Down Expand Up @@ -1263,10 +1274,10 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx
// check if batch needs to be synchronized
if batches[0] != nil {
if checkIfSynced(batches, trustedBatch) {
log.Debugf("Batch %v already synchronized", trustedBatch.Number)
log.Debugf("Batch %d already synchronized", uint64(trustedBatch.Number))
return batches, s.trustedState.lastStateRoot, nil
}
log.Infof("Batch %v needs to be updated", trustedBatch.Number)
log.Infof("Batch %d needs to be updated", uint64(trustedBatch.Number))

// Find txs to be processed and included in the trusted state
if *s.trustedState.lastStateRoot == batches[1].StateRoot {
Expand Down Expand Up @@ -1311,7 +1322,15 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx
if isBatchClosed {
//Sanity check
if s.trustedState.lastStateRoot != nil && trustedBatch.StateRoot != *s.trustedState.lastStateRoot {
s.halt(s.ctx, fmt.Errorf("stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", *s.trustedState.lastStateRoot, trustedBatch.StateRoot))
log.Errorf("batch %d, different batchL2Datas (trustedBatchL2Data: %s, batches[0].BatchL2Data: %s). Decoded txs are len(storedTxs): %d, len(syncedTxs): %d", uint64(trustedBatch.Number), trustedBatchL2Data.Hex(), "0x"+common.Bytes2Hex(batches[0].BatchL2Data), len(storedTxs), len(syncedTxs))
for _, tx := range storedTxs {
log.Error("stored txHash : ", tx.Hash())
}
for _, tx := range syncedTxs {
log.Error("synced txHash : ", tx.Hash())
}
log.Errorf("batch: %d, stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", uint64(trustedBatch.Number), *s.trustedState.lastStateRoot, trustedBatch.StateRoot)
return nil, nil, fmt.Errorf("batch: %d, stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", uint64(trustedBatch.Number), *s.trustedState.lastStateRoot, trustedBatch.StateRoot)
}
receipt := state.ProcessingReceipt{
BatchNumber: uint64(trustedBatch.Number),
Expand All @@ -1320,14 +1339,14 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx
BatchL2Data: trustedBatchL2Data,
AccInputHash: trustedBatch.AccInputHash,
}
log.Debugf("closing batch %v", trustedBatch.Number)
log.Debugf("closing batch %d", uint64(trustedBatch.Number))
if err := s.state.CloseBatch(s.ctx, receipt, dbTx); err != nil {
// This is a workaround to avoid closing a batch that was already closed
if err.Error() != state.ErrBatchAlreadyClosed.Error() {
log.Errorf("error closing batch %d", trustedBatch.Number)
log.Errorf("error closing batch %d", uint64(trustedBatch.Number))
return nil, nil, err
} else {
log.Warnf("CASE 02: the batch [%d] was already closed", trustedBatch.Number)
log.Warnf("CASE 02: the batch [%d] was already closed", uint64(trustedBatch.Number))
log.Info("batches[0].BatchNumber: ", batches[0].BatchNumber)
log.Info("batches[0].AccInputHash: ", batches[0].AccInputHash)
log.Info("batches[0].StateRoot: ", batches[0].StateRoot)
Expand Down Expand Up @@ -1363,13 +1382,13 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx
// Update batchL2Data
err := s.state.UpdateBatchL2Data(s.ctx, batches[0].BatchNumber, trustedBatchL2Data, dbTx)
if err != nil {
log.Errorf("error opening batch %d", trustedBatch.Number)
log.Errorf("error opening batch %d", uint64(trustedBatch.Number))
return nil, nil, err
}
batches[0].BatchL2Data = trustedBatchL2Data
log.Debug("BatchL2Data updated for batch: ", batches[0].BatchNumber)
} else {
log.Infof("Batch %v needs to be synchronized", trustedBatch.Number)
log.Infof("Batch %d needs to be synchronized", uint64(trustedBatch.Number))
err := s.openBatch(trustedBatch, dbTx)
if err != nil {
log.Error("error openning batch. Error: ", err)
Expand All @@ -1379,7 +1398,7 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx
request.Transactions = trustedBatchL2Data
}

log.Debugf("Processing sequencer for batch %v", trustedBatch.Number)
log.Debugf("Processing sequencer for batch %d", uint64(trustedBatch.Number))

processBatchResp, err := s.processAndStoreTxs(trustedBatch, request, dbTx)
if err != nil {
Expand All @@ -1392,7 +1411,10 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx
if isBatchClosed {
//Sanity check
if trustedBatch.StateRoot != processBatchResp.NewStateRoot {
s.halt(s.ctx, fmt.Errorf("stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", processBatchResp.NewStateRoot, trustedBatch.StateRoot))
log.Error("trustedBatchL2Data: ", trustedBatchL2Data)
log.Error("request.Transactions: ", request.Transactions)
log.Errorf("batch: %d after processing some txs, stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", uint64(trustedBatch.Number), processBatchResp.NewStateRoot.String(), trustedBatch.StateRoot.String())
return nil, nil, fmt.Errorf("batch: %d, stateRoot calculated (%s) is different from the stateRoot (%s) received during the trustedState synchronization", uint64(trustedBatch.Number), processBatchResp.NewStateRoot.String(), trustedBatch.StateRoot.String())
}
receipt := state.ProcessingReceipt{
BatchNumber: uint64(trustedBatch.Number),
Expand All @@ -1402,14 +1424,14 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx
AccInputHash: trustedBatch.AccInputHash,
}

log.Debugf("closing batch %v", trustedBatch.Number)
log.Debugf("closing batch %d", uint64(trustedBatch.Number))
if err := s.state.CloseBatch(s.ctx, receipt, dbTx); err != nil {
// This is a workarround to avoid closing a batch that was already closed
if err.Error() != state.ErrBatchAlreadyClosed.Error() {
log.Errorf("error closing batch %d", trustedBatch.Number)
log.Errorf("error closing batch %d", uint64(trustedBatch.Number))
return nil, nil, err
} else {
log.Warnf("CASE 01: batch [%d] was already closed", trustedBatch.Number)
log.Warnf("CASE 01: batch [%d] was already closed", uint64(trustedBatch.Number))
}
}
log.Info("Batch closed right after processing some tx")
Expand All @@ -1418,10 +1440,11 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx
batches[0].AccInputHash = trustedBatch.AccInputHash
batches[0].StateRoot = trustedBatch.StateRoot
batches[0].LocalExitRoot = trustedBatch.LocalExitRoot
batches[0].BatchL2Data = trustedBatchL2Data
}
}

log.Infof("Batch %v synchronized", trustedBatch.Number)
log.Infof("Batch %d synchronized", uint64(trustedBatch.Number))
return batches, &processBatchResp.NewStateRoot, nil
}

Expand Down
15 changes: 6 additions & 9 deletions synchronizer/synchronizer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,17 +35,14 @@ type mocks struct {
//EventLog *eventLogMock
}

//func Test_Given_StartingSynchronizer_When_CallFirstTimeExecutor_Then_StoreProverID(t *testing.T) {
//}

// Feature #2220 and #2239: Optimize Trusted state synchronization
//
// this Check partially point 2: Use previous batch stored in memory to avoid getting from database
func Test_Given_PermissionlessNode_When_SyncronizeAgainSameBatch_Then_UseTheOneInMemoryInstaeadOfGettingFromDb(t *testing.T) {
func TestGivenPermissionlessNodeWhenSyncronizeAgainSameBatchThenUseTheOneInMemoryInstaeadOfGettingFromDb(t *testing.T) {
genesis, cfg, m := setupGenericTest(t)
sync_interface, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg)
syncInterface, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg)
require.NoError(t, err)
sync, ok := sync_interface.(*ClientSynchronizer)
sync, ok := syncInterface.(*ClientSynchronizer)
require.EqualValues(t, true, ok, "Can't convert to underlaying struct the interface of syncronizer")
lastBatchNumber := uint64(10)
batch10With1Tx := createBatch(t, lastBatchNumber, 1)
Expand All @@ -64,11 +61,11 @@ func Test_Given_PermissionlessNode_When_SyncronizeAgainSameBatch_Then_UseTheOneI
// Feature #2220 and #2239: Optimize Trusted state synchronization
//
// this Check partially point 2: Store last batch in memory (CurrentTrustedBatch)
func Test_Given_PermissionlessNode_When_SyncronizeFirstTimeABatch_Then_StoreItInALocalVar(t *testing.T) {
func TestGivenPermissionlessNodeWhenSyncronizeFirstTimeABatchThenStoreItInALocalVar(t *testing.T) {
genesis, cfg, m := setupGenericTest(t)
sync_interface, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg)
syncInterface, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg)
require.NoError(t, err)
sync, ok := sync_interface.(*ClientSynchronizer)
sync, ok := syncInterface.(*ClientSynchronizer)
require.EqualValues(t, true, ok, "Can't convert to underlaying struct the interface of syncronizer")
lastBatchNumber := uint64(10)
batch10With1Tx := createBatch(t, lastBatchNumber, 1)
Expand Down

0 comments on commit b083a59

Please sign in to comment.