forked from 0xPolygonHermez/zkevm-node
-
Notifications
You must be signed in to change notification settings - Fork 0
/
finalizer.go
788 lines (671 loc) · 30 KB
/
finalizer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
package sequencer
import (
"context"
"errors"
"fmt"
"math/big"
"sync"
"sync/atomic"
"time"
"github.com/0xPolygonHermez/zkevm-data-streamer/datastreamer"
"github.com/0xPolygonHermez/zkevm-node/event"
"github.com/0xPolygonHermez/zkevm-node/hex"
"github.com/0xPolygonHermez/zkevm-node/log"
"github.com/0xPolygonHermez/zkevm-node/pool"
"github.com/0xPolygonHermez/zkevm-node/state"
stateMetrics "github.com/0xPolygonHermez/zkevm-node/state/metrics"
"github.com/0xPolygonHermez/zkevm-node/state/runtime"
"github.com/0xPolygonHermez/zkevm-node/state/runtime/executor"
"github.com/ethereum/go-ethereum/common"
)
const (
pendingL2BlocksBufferSize = 100
changeL2BlockSize = 9 //1 byte (tx type = 0B) + 4 bytes for deltaTimestamp + 4 for l1InfoTreeIndex
)
var (
now = time.Now
)
// finalizer represents the finalizer component of the sequencer.
type finalizer struct {
cfg FinalizerCfg
isSynced func(ctx context.Context) bool
sequencerAddress common.Address
workerIntf workerInterface
poolIntf txPool
stateIntf stateInterface
etherman etherman
wipBatch *Batch
wipL2Block *L2Block
batchConstraints state.BatchConstraintsCfg
haltFinalizer atomic.Bool
// forced batches
nextForcedBatches []state.ForcedBatch
nextForcedBatchDeadline int64
nextForcedBatchesMux *sync.Mutex
lastForcedBatchNum uint64
// L1InfoTree
lastL1InfoTreeValid bool
lastL1InfoTree state.L1InfoTreeExitRootStorageEntry
lastL1InfoTreeMux *sync.Mutex
lastL1InfoTreeCond *sync.Cond
// event log
eventLog *event.EventLog
// effective gas price calculation instance
effectiveGasPrice *pool.EffectiveGasPrice
// pending L2 blocks to process (executor)
pendingL2BlocksToProcess chan *L2Block
pendingL2BlocksToProcessWG *sync.WaitGroup
// pending L2 blocks to store in the state
pendingL2BlocksToStore chan *L2Block
pendingL2BlocksToStoreWG *sync.WaitGroup
// L2 block counter for tracking purposes
l2BlockCounter uint64
// executor flushid control
proverID string
storedFlushID uint64
storedFlushIDCond *sync.Cond //Condition to wait until storedFlushID has been updated
lastPendingFlushID uint64
pendingFlushIDCond *sync.Cond
// worker ready txs condition
workerReadyTxsCond *timeoutCond
// interval metrics
metrics *intervalMetrics
// stream server
streamServer *datastreamer.StreamServer
dataToStream chan interface{}
}
// newFinalizer returns a new instance of Finalizer.
func newFinalizer(
cfg FinalizerCfg,
poolCfg pool.Config,
workerIntf workerInterface,
poolIntf txPool,
stateIntf stateInterface,
etherman etherman,
sequencerAddr common.Address,
isSynced func(ctx context.Context) bool,
batchConstraints state.BatchConstraintsCfg,
eventLog *event.EventLog,
streamServer *datastreamer.StreamServer,
workerReadyTxsCond *timeoutCond,
dataToStream chan interface{},
) *finalizer {
f := finalizer{
cfg: cfg,
isSynced: isSynced,
sequencerAddress: sequencerAddr,
workerIntf: workerIntf,
poolIntf: poolIntf,
stateIntf: stateIntf,
etherman: etherman,
batchConstraints: batchConstraints,
// forced batches
nextForcedBatches: make([]state.ForcedBatch, 0),
nextForcedBatchDeadline: 0,
nextForcedBatchesMux: new(sync.Mutex),
// L1InfoTree
lastL1InfoTreeValid: false,
lastL1InfoTreeMux: new(sync.Mutex),
lastL1InfoTreeCond: sync.NewCond(&sync.Mutex{}),
// event log
eventLog: eventLog,
// effective gas price calculation instance
effectiveGasPrice: pool.NewEffectiveGasPrice(poolCfg.EffectiveGasPrice),
// pending L2 blocks to process (executor)
pendingL2BlocksToProcess: make(chan *L2Block, pendingL2BlocksBufferSize),
pendingL2BlocksToProcessWG: new(sync.WaitGroup),
// pending L2 blocks to store in the state
pendingL2BlocksToStore: make(chan *L2Block, pendingL2BlocksBufferSize),
pendingL2BlocksToStoreWG: new(sync.WaitGroup),
storedFlushID: 0,
// executor flushid control
proverID: "",
storedFlushIDCond: sync.NewCond(&sync.Mutex{}),
lastPendingFlushID: 0,
pendingFlushIDCond: sync.NewCond(&sync.Mutex{}),
// worker ready txs condition
workerReadyTxsCond: workerReadyTxsCond,
// metrics
metrics: newIntervalMetrics(cfg.Metrics.Interval.Duration),
// stream server
streamServer: streamServer,
dataToStream: dataToStream,
}
f.haltFinalizer.Store(false)
return &f
}
// Start starts the finalizer.
func (f *finalizer) Start(ctx context.Context) {
// Do sanity check for batches closed but pending to be checked
f.processBatchesPendingtoCheck(ctx)
// Update L1InfoRoot
go f.checkL1InfoTreeUpdate(ctx)
// Get the last batch if still wip or opens a new one
f.initWIPBatch(ctx)
// Initializes the wip L2 block
f.initWIPL2Block(ctx)
// Update the prover id and flush id
go f.updateProverIdAndFlushId(ctx)
// Process L2 Blocks
go f.processPendingL2Blocks(ctx)
// Store L2 Blocks
go f.storePendingL2Blocks(ctx)
// Foced batches checking
go f.checkForcedBatches(ctx)
// Processing transactions and finalizing batches
f.finalizeBatches(ctx)
}
// updateProverIdAndFlushId updates the prover id and flush id
func (f *finalizer) updateProverIdAndFlushId(ctx context.Context) {
for {
f.pendingFlushIDCond.L.Lock()
// f.storedFlushID is >= than f.lastPendingFlushID, this means all pending txs (flushid) are stored by the executor.
// We are "synced" with the flush id, therefore we need to wait for new tx (new pending flush id to be stored by the executor)
for f.storedFlushID >= f.lastPendingFlushID {
f.pendingFlushIDCond.Wait()
}
f.pendingFlushIDCond.L.Unlock()
for f.storedFlushID < f.lastPendingFlushID { //TODO: review this loop as could be is pulling all the time, no sleep
storedFlushID, proverID, err := f.stateIntf.GetStoredFlushID(ctx)
if err != nil {
log.Errorf("failed to get stored flush id, error: %v", err)
} else {
if storedFlushID != f.storedFlushID {
// Check if prover/Executor has been restarted
f.checkIfProverRestarted(proverID)
// Update f.storeFlushID and signal condition f.storedFlushIDCond
f.storedFlushIDCond.L.Lock()
f.storedFlushID = storedFlushID
f.storedFlushIDCond.Broadcast()
f.storedFlushIDCond.L.Unlock()
}
}
}
}
}
// updateFlushIDs updates f.lastPendingFLushID and f.storedFlushID with newPendingFlushID and newStoredFlushID values (it they have changed)
// and sends the signals conditions f.pendingFlushIDCond and f.storedFlushIDCond to notify other go funcs that the values have changed
func (f *finalizer) updateFlushIDs(newPendingFlushID, newStoredFlushID uint64) {
if newPendingFlushID > f.lastPendingFlushID {
f.lastPendingFlushID = newPendingFlushID
f.pendingFlushIDCond.Broadcast()
}
f.storedFlushIDCond.L.Lock()
if newStoredFlushID > f.storedFlushID {
f.storedFlushID = newStoredFlushID
f.storedFlushIDCond.Broadcast()
}
f.storedFlushIDCond.L.Unlock()
}
func (f *finalizer) checkL1InfoTreeUpdate(ctx context.Context) {
firstL1InfoRootUpdate := true
for {
lastL1BlockNumber, err := f.etherman.GetLatestBlockNumber(ctx)
if err != nil {
log.Errorf("error getting latest L1 block number, error: %v", err)
}
maxBlockNumber := uint64(0)
if f.cfg.L1InfoTreeL1BlockConfirmations <= lastL1BlockNumber {
maxBlockNumber = lastL1BlockNumber - f.cfg.L1InfoTreeL1BlockConfirmations
}
l1InfoRoot, err := f.stateIntf.GetLatestL1InfoRoot(ctx, maxBlockNumber)
if err != nil {
log.Errorf("error checking latest L1InfoRoot, error: %v", err)
continue
}
// L1InfoTreeIndex = 0 is a special case (empty tree) therefore we will set GER as zero
if l1InfoRoot.L1InfoTreeIndex == 0 {
l1InfoRoot.GlobalExitRoot.GlobalExitRoot = state.ZeroHash
}
if firstL1InfoRootUpdate || l1InfoRoot.L1InfoTreeIndex > f.lastL1InfoTree.L1InfoTreeIndex {
firstL1InfoRootUpdate = false
log.Debugf("received new L1InfoRoot. L1InfoTreeIndex: %d", l1InfoRoot.L1InfoTreeIndex)
f.lastL1InfoTreeMux.Lock()
f.lastL1InfoTree = l1InfoRoot
f.lastL1InfoTreeMux.Unlock()
if !f.lastL1InfoTreeValid {
f.lastL1InfoTreeCond.L.Lock()
f.lastL1InfoTreeValid = true
f.lastL1InfoTreeCond.Broadcast()
f.lastL1InfoTreeCond.L.Unlock()
}
}
time.Sleep(f.cfg.L1InfoTreeCheckInterval.Duration)
}
}
// finalizeBatches runs the endless loop for processing transactions finalizing batches.
func (f *finalizer) finalizeBatches(ctx context.Context) {
log.Debug("finalizer init loop")
showNotFoundTxLog := true // used to log debug only the first message when there is no txs to process
for {
// We have reached the L2 block time, we need to close the current L2 block and open a new one
if f.wipL2Block.timestamp+uint64(f.cfg.L2BlockMaxDeltaTimestamp.Seconds()) <= uint64(time.Now().Unix()) {
f.finalizeWIPL2Block(ctx)
}
tx, err := f.workerIntf.GetBestFittingTx(f.wipBatch.imRemainingResources)
// If we have txs pending to process but none of them fits into the wip batch, we close the wip batch and open a new one
if err == ErrNoFittingTransaction {
f.finalizeWIPBatch(ctx, state.NoTxFitsClosingReason)
continue
}
if tx != nil {
showNotFoundTxLog = true
firstTxProcess := true
for {
_, err := f.processTransaction(ctx, tx, firstTxProcess)
if err != nil {
if err == ErrEffectiveGasPriceReprocess {
firstTxProcess = false
log.Infof("reprocessing tx %s because of effective gas price calculation", tx.HashStr)
continue
} else if err == ErrBatchResourceOverFlow {
log.Infof("skipping tx %s due to a batch resource overflow", tx.HashStr)
break
} else {
log.Errorf("failed to process tx %s, error: %v", err)
break
}
}
break
}
} else {
idleTime := time.Now()
if showNotFoundTxLog {
log.Debug("no transactions to be processed. Waiting...")
showNotFoundTxLog = false
}
// wait for new ready txs in worker
f.workerReadyTxsCond.L.Lock()
f.workerReadyTxsCond.WaitOrTimeout(f.cfg.NewTxsWaitInterval.Duration)
f.workerReadyTxsCond.L.Unlock()
// Increase idle time of the WIP L2Block
f.wipL2Block.metrics.idleTime += time.Since(idleTime)
}
if f.haltFinalizer.Load() {
// There is a fatal error and we need to halt the finalizer and stop processing new txs
for {
time.Sleep(5 * time.Second) //nolint:gomnd
}
}
// Check if we must finalize the batch due to a closing reason (resources exhausted, max txs, timestamp resolution, forced batches deadline)
if finalize, closeReason := f.checkIfFinalizeBatch(); finalize {
f.finalizeWIPBatch(ctx, closeReason)
}
if err := ctx.Err(); err != nil {
log.Errorf("stopping finalizer because of context, error: %v", err)
return
}
}
}
// processTransaction processes a single transaction.
func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker, firstTxProcess bool) (errWg *sync.WaitGroup, err error) {
start := time.Now()
log.Infof("processing tx %s, batchNumber: %d, l2Block: [%d], oldStateRoot: %s, L1InfoRootIndex: %d",
tx.HashStr, f.wipBatch.batchNumber, f.wipL2Block.trackingNum, f.wipBatch.imStateRoot, f.wipL2Block.l1InfoTreeExitRoot.L1InfoTreeIndex)
batchRequest := state.ProcessRequest{
BatchNumber: f.wipBatch.batchNumber,
OldStateRoot: f.wipBatch.imStateRoot,
Coinbase: f.wipBatch.coinbase,
L1InfoRoot_V2: state.GetMockL1InfoRoot(),
TimestampLimit_V2: f.wipL2Block.timestamp,
Caller: stateMetrics.DiscardCallerLabel,
ForkID: f.stateIntf.GetForkIDByBatchNumber(f.wipBatch.batchNumber),
Transactions: tx.RawTx,
SkipFirstChangeL2Block_V2: true,
SkipWriteBlockInfoRoot_V2: true,
SkipVerifyL1InfoRoot_V2: true,
L1InfoTreeData_V2: map[uint32]state.L1DataV2{},
}
txGasPrice := tx.GasPrice
// If it is the first time we process this tx then we calculate the EffectiveGasPrice
if firstTxProcess {
// Get L1 gas price and store in txTracker to make it consistent during the lifespan of the transaction
tx.L1GasPrice, tx.L2GasPrice = f.poolIntf.GetL1AndL2GasPrice()
// Get the tx and l2 gas price we will use in the egp calculation. If egp is disabled we will use a "simulated" tx gas price
txGasPrice, txL2GasPrice := f.effectiveGasPrice.GetTxAndL2GasPrice(tx.GasPrice, tx.L1GasPrice, tx.L2GasPrice)
// Save values for later logging
tx.EGPLog.L1GasPrice = tx.L1GasPrice
tx.EGPLog.L2GasPrice = txL2GasPrice
tx.EGPLog.GasUsedFirst = tx.UsedZKCounters.GasUsed
tx.EGPLog.GasPrice.Set(txGasPrice)
// Calculate EffectiveGasPrice
egp, err := f.effectiveGasPrice.CalculateEffectiveGasPrice(tx.RawTx, txGasPrice, tx.UsedZKCounters.GasUsed, tx.L1GasPrice, txL2GasPrice)
if err != nil {
if f.effectiveGasPrice.IsEnabled() {
return nil, err
} else {
log.Warnf("effectiveGasPrice is disabled, but failed to calculate effectiveGasPrice for tx %s, error: %v", tx.HashStr, err)
tx.EGPLog.Error = fmt.Sprintf("CalculateEffectiveGasPrice#1: %s", err)
}
} else {
tx.EffectiveGasPrice.Set(egp)
// Save first EffectiveGasPrice for later logging
tx.EGPLog.ValueFirst.Set(tx.EffectiveGasPrice)
// If EffectiveGasPrice >= txGasPrice, we process the tx with tx.GasPrice
if tx.EffectiveGasPrice.Cmp(txGasPrice) >= 0 {
loss := new(big.Int).Sub(tx.EffectiveGasPrice, txGasPrice)
// If loss > 0 the warning message indicating we loss fee for thix tx
if loss.Cmp(new(big.Int).SetUint64(0)) == 1 {
log.Warnf("egp-loss: gasPrice: %d, effectiveGasPrice1: %d, loss: %d, tx: %s", txGasPrice, tx.EffectiveGasPrice, loss, tx.HashStr)
}
tx.EffectiveGasPrice.Set(txGasPrice)
tx.IsLastExecution = true
}
}
}
egpPercentage, err := f.effectiveGasPrice.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice)
if err != nil {
if f.effectiveGasPrice.IsEnabled() {
return nil, err
} else {
log.Warnf("effectiveGasPrice is disabled, but failed to to calculate efftive gas price percentage (#1), error: %v", err)
tx.EGPLog.Error = fmt.Sprintf("%s; CalculateEffectiveGasPricePercentage#1: %s", tx.EGPLog.Error, err)
}
} else {
// Save percentage for later logging
tx.EGPLog.Percentage = egpPercentage
}
// If EGP is disabled we use tx GasPrice (MaxEffectivePercentage=255)
if !f.effectiveGasPrice.IsEnabled() {
egpPercentage = state.MaxEffectivePercentage
}
// Assign applied EGP percentage to tx (TxTracker)
tx.EGPPercentage = egpPercentage
effectivePercentageAsDecodedHex, err := hex.DecodeHex(fmt.Sprintf("%x", tx.EGPPercentage))
if err != nil {
return nil, err
}
batchRequest.Transactions = append(batchRequest.Transactions, effectivePercentageAsDecodedHex...)
executionStart := time.Now()
batchResponse, err := f.stateIntf.ProcessBatchV2(ctx, batchRequest, false)
executionTime := time.Since(executionStart)
f.wipL2Block.metrics.transactionsTimes.executor += executionTime
if err != nil && (errors.Is(err, runtime.ErrExecutorDBError) || errors.Is(err, runtime.ErrInvalidTxChangeL2BlockMinTimestamp)) {
log.Errorf("failed to process tx %s, error: %v", tx.HashStr, err)
return nil, err
} else if err == nil && !batchResponse.IsRomLevelError && len(batchResponse.BlockResponses) == 0 {
err = fmt.Errorf("executor returned no errors and no responses for tx %s", tx.HashStr)
f.Halt(ctx, err, false)
} else if err != nil {
log.Errorf("error received from executor, error: %v", err)
// Delete tx from the worker
f.workerIntf.DeleteTx(tx.Hash, tx.From)
// Set tx as invalid in the pool
errMsg := err.Error()
err = f.poolIntf.UpdateTxStatus(ctx, tx.Hash, pool.TxStatusInvalid, false, &errMsg)
if err != nil {
log.Errorf("failed to update status to invalid in the pool for tx %s, error: %v", tx.Hash.String(), err)
}
return nil, err
}
oldStateRoot := f.wipBatch.imStateRoot
if len(batchResponse.BlockResponses) > 0 {
errWg, err = f.handleProcessTransactionResponse(ctx, tx, batchResponse, oldStateRoot)
if err != nil {
return errWg, err
}
}
// Update imStateRoot
f.wipBatch.imStateRoot = batchResponse.NewStateRoot
log.Infof("processed tx %s, batchNumber: %d, l2Block: [%d], newStateRoot: %s, oldStateRoot: %s, time: {process: %v, executor: %v}, used counters: %s, reserved counters: %s",
tx.HashStr, batchRequest.BatchNumber, f.wipL2Block.trackingNum, batchResponse.NewStateRoot.String(), batchRequest.OldStateRoot.String(),
time.Since(start), executionTime, f.logZKCounters(batchResponse.UsedZkCounters), f.logZKCounters(batchResponse.ReservedZkCounters))
return nil, nil
}
// handleProcessTransactionResponse handles the response of transaction processing.
func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *TxTracker, result *state.ProcessBatchResponse, oldStateRoot common.Hash) (errWg *sync.WaitGroup, err error) {
txResponse := result.BlockResponses[0].TransactionResponses[0]
// Update metrics
f.wipL2Block.metrics.processedTxsCount++
// Handle Transaction Error
errorCode := executor.RomErrorCode(txResponse.RomError)
if !state.IsStateRootChanged(errorCode) {
// If intrinsic error or OOC error, we skip adding the transaction to the batch
errWg = f.handleProcessTransactionError(ctx, result, tx)
return errWg, txResponse.RomError
}
egpEnabled := f.effectiveGasPrice.IsEnabled()
if !tx.IsLastExecution {
tx.IsLastExecution = true
// Get the tx gas price we will use in the egp calculation. If egp is disabled we will use a "simulated" tx gas price
txGasPrice, txL2GasPrice := f.effectiveGasPrice.GetTxAndL2GasPrice(tx.GasPrice, tx.L1GasPrice, tx.L2GasPrice)
newEffectiveGasPrice, err := f.effectiveGasPrice.CalculateEffectiveGasPrice(tx.RawTx, txGasPrice, txResponse.GasUsed, tx.L1GasPrice, txL2GasPrice)
if err != nil {
if egpEnabled {
log.Errorf("failed to calculate effective gas price with new gasUsed for tx %s, error: %v", tx.HashStr, err.Error())
return nil, err
} else {
log.Warnf("effectiveGasPrice is disabled, but failed to calculate effective gas price with new gasUsed for tx %s, error: %v", tx.HashStr, err.Error())
tx.EGPLog.Error = fmt.Sprintf("%s; CalculateEffectiveGasPrice#2: %s", tx.EGPLog.Error, err)
}
} else {
// Save new (second) gas used and second effective gas price calculation for later logging
tx.EGPLog.ValueSecond.Set(newEffectiveGasPrice)
tx.EGPLog.GasUsedSecond = txResponse.GasUsed
errCompare := f.compareTxEffectiveGasPrice(ctx, tx, newEffectiveGasPrice, txResponse.HasGaspriceOpcode, txResponse.HasBalanceOpcode)
// If EffectiveGasPrice is disabled we will calculate the percentage and save it for later logging
if !egpEnabled {
effectivePercentage, err := f.effectiveGasPrice.CalculateEffectiveGasPricePercentage(txGasPrice, tx.EffectiveGasPrice)
if err != nil {
log.Warnf("effectiveGasPrice is disabled, but failed to calculate effective gas price percentage (#2), error: %v", err)
tx.EGPLog.Error = fmt.Sprintf("%s, CalculateEffectiveGasPricePercentage#2: %s", tx.EGPLog.Error, err)
} else {
// Save percentage for later logging
tx.EGPLog.Percentage = effectivePercentage
}
}
if errCompare != nil && egpEnabled {
return nil, errCompare
}
}
}
// Check if reserved resources of the tx fits in the remaining batch resources
subOverflow := false
fits, overflowResource := f.wipBatch.imRemainingResources.Fits(state.BatchResources{ZKCounters: result.ReservedZkCounters, Bytes: uint64(len(tx.RawTx))})
if fits {
// Subtract the used resources from the batch
subOverflow, overflowResource = f.wipBatch.imRemainingResources.Sub(state.BatchResources{ZKCounters: result.UsedZkCounters, Bytes: uint64(len(tx.RawTx))})
if subOverflow { // Sanity check, this cannot happen as reservedZKCounters should be >= that usedZKCounters
sLog := fmt.Sprintf("tx %s used resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. Batch counters: %s, tx used counters: %s",
tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.UsedZkCounters))
log.Errorf(sLog)
f.LogEvent(ctx, event.Level_Error, event.EventID_UsedZKCountersOverflow, sLog, nil)
}
} else {
log.Infof("current tx %s reserved resources exceeds the remaining batch resources, overflow resource: %s, updating metadata for tx in worker and continuing. Batch counters: %s, tx reserved counters: %s",
tx.HashStr, overflowResource, f.logZKCounters(f.wipBatch.imRemainingResources.ZKCounters), f.logZKCounters(result.ReservedZkCounters))
if !f.batchConstraints.IsWithinConstraints(result.ReservedZkCounters) {
log.Infof("current tx %s reserved resources exceeds the max limit for batch resources (node OOC), setting tx as invalid in the pool", tx.HashStr)
f.LogEvent(ctx, event.Level_Info, event.EventID_NodeOOC,
fmt.Sprintf("tx %s exceeds node max limit batch resources (node OOC), from: %s, IP: %s", tx.HashStr, tx.FromStr, tx.IP), nil)
// Delete the transaction from the txSorted list
f.workerIntf.DeleteTx(tx.Hash, tx.From)
errMsg := "node OOC"
err = f.poolIntf.UpdateTxStatus(ctx, tx.Hash, pool.TxStatusInvalid, false, &errMsg)
if err != nil {
log.Errorf("failed to update status to invalid in the pool for tx %s, error: %v", tx.Hash.String(), err)
}
return nil, ErrBatchResourceOverFlow
}
}
// If reserved tx resources don't fit in the remaining batch resources (or we got an overflow when trying to subtract the used resources)
// we update the ZKCounters of the tx and returns ErrBatchResourceOverFlow error
if !fits || subOverflow {
f.workerIntf.UpdateTxZKCounters(txResponse.TxHash, tx.From, result.UsedZkCounters, result.ReservedZkCounters)
return nil, ErrBatchResourceOverFlow
}
// Save Enabled, GasPriceOC, BalanceOC and final effective gas price for later logging
tx.EGPLog.Enabled = egpEnabled
tx.EGPLog.GasPriceOC = txResponse.HasGaspriceOpcode
tx.EGPLog.BalanceOC = txResponse.HasBalanceOpcode
tx.EGPLog.ValueFinal.Set(tx.EffectiveGasPrice)
// Log here the results of EGP calculation
log.Infof("egp-log: final: %d, first: %d, second: %d, percentage: %d, deviation: %d, maxDeviation: %d, gasUsed1: %d, gasUsed2: %d, gasPrice: %d, l1GasPrice: %d, l2GasPrice: %d, reprocess: %t, gasPriceOC: %t, balanceOC: %t, enabled: %t, txSize: %d, tx: %s, error: %s",
tx.EGPLog.ValueFinal, tx.EGPLog.ValueFirst, tx.EGPLog.ValueSecond, tx.EGPLog.Percentage, tx.EGPLog.FinalDeviation, tx.EGPLog.MaxDeviation, tx.EGPLog.GasUsedFirst, tx.EGPLog.GasUsedSecond,
tx.EGPLog.GasPrice, tx.EGPLog.L1GasPrice, tx.EGPLog.L2GasPrice, tx.EGPLog.Reprocess, tx.EGPLog.GasPriceOC, tx.EGPLog.BalanceOC, egpEnabled, len(tx.RawTx), tx.HashStr, tx.EGPLog.Error)
f.wipL2Block.addTx(tx)
f.wipBatch.countOfTxs++
f.updateWorkerAfterSuccessfulProcessing(ctx, tx.Hash, tx.From, false, result)
// Update metrics
f.wipL2Block.metrics.gas += txResponse.GasUsed
return nil, nil
}
// compareTxEffectiveGasPrice compares newEffectiveGasPrice with tx.EffectiveGasPrice.
// It returns ErrEffectiveGasPriceReprocess if the tx needs to be reprocessed with
// the tx.EffectiveGasPrice updated, otherwise it returns nil
func (f *finalizer) compareTxEffectiveGasPrice(ctx context.Context, tx *TxTracker, newEffectiveGasPrice *big.Int, hasGasPriceOC bool, hasBalanceOC bool) error {
// Get the tx gas price we will use in the egp calculation. If egp is disabled we will use a "simulated" tx gas price
txGasPrice, _ := f.effectiveGasPrice.GetTxAndL2GasPrice(tx.GasPrice, tx.L1GasPrice, tx.L2GasPrice)
// Compute the absolute difference between tx.EffectiveGasPrice - newEffectiveGasPrice
diff := new(big.Int).Abs(new(big.Int).Sub(tx.EffectiveGasPrice, newEffectiveGasPrice))
// Compute max deviation allowed of newEffectiveGasPrice
maxDeviation := new(big.Int).Div(new(big.Int).Mul(tx.EffectiveGasPrice, new(big.Int).SetUint64(f.effectiveGasPrice.GetFinalDeviation())), big.NewInt(100)) //nolint:gomnd
// Save FinalDeviation (diff) and MaxDeviation for later logging
tx.EGPLog.FinalDeviation.Set(diff)
tx.EGPLog.MaxDeviation.Set(maxDeviation)
// if (diff > finalDeviation)
if diff.Cmp(maxDeviation) == 1 {
// if newEfectiveGasPrice < txGasPrice
if newEffectiveGasPrice.Cmp(txGasPrice) == -1 {
if hasGasPriceOC || hasBalanceOC {
tx.EffectiveGasPrice.Set(txGasPrice)
} else {
tx.EffectiveGasPrice.Set(newEffectiveGasPrice)
}
} else {
tx.EffectiveGasPrice.Set(txGasPrice)
loss := new(big.Int).Sub(newEffectiveGasPrice, txGasPrice)
// If loss > 0 the warning message indicating we loss fee for thix tx
if loss.Cmp(new(big.Int).SetUint64(0)) == 1 {
log.Warnf("egp-loss: gasPrice: %d, EffectiveGasPrice2: %d, loss: %d, tx: %s", txGasPrice, newEffectiveGasPrice, loss, tx.HashStr)
}
}
// Save Reprocess for later logging
tx.EGPLog.Reprocess = true
return ErrEffectiveGasPriceReprocess
} // else (diff <= finalDeviation) it is ok, no reprocess of the tx is needed
return nil
}
func (f *finalizer) updateWorkerAfterSuccessfulProcessing(ctx context.Context, txHash common.Hash, txFrom common.Address, isForced bool, result *state.ProcessBatchResponse) {
// Delete the transaction from the worker
if isForced {
f.workerIntf.DeleteForcedTx(txHash, txFrom)
log.Debugf("forced tx %s deleted from address %s", txHash.String(), txFrom.Hex())
return
} else {
f.workerIntf.DeleteTx(txHash, txFrom)
log.Debugf("tx %s deleted from address %s", txHash.String(), txFrom.Hex())
}
txsToDelete := f.workerIntf.UpdateAfterSingleSuccessfulTxExecution(txFrom, result.ReadWriteAddresses)
for _, txToDelete := range txsToDelete {
err := f.poolIntf.UpdateTxStatus(ctx, txToDelete.Hash, pool.TxStatusFailed, false, txToDelete.FailedReason)
if err != nil {
log.Errorf("failed to update status to failed in the pool for tx %s, error: %v", txToDelete.Hash.String(), err)
continue
}
}
}
// handleProcessTransactionError handles the error of a transaction
func (f *finalizer) handleProcessTransactionError(ctx context.Context, result *state.ProcessBatchResponse, tx *TxTracker) *sync.WaitGroup {
txResponse := result.BlockResponses[0].TransactionResponses[0]
errorCode := executor.RomErrorCode(txResponse.RomError)
addressInfo := result.ReadWriteAddresses[tx.From]
log.Infof("rom error in tx %s, errorCode: %d", tx.HashStr, errorCode)
wg := new(sync.WaitGroup)
failedReason := executor.RomErr(errorCode).Error()
if executor.IsROMOutOfCountersError(errorCode) {
log.Errorf("ROM out of counters error, marking tx %s as invalid, errorCode: %d", tx.HashStr, errorCode)
f.workerIntf.DeleteTx(tx.Hash, tx.From)
wg.Add(1)
go func() {
defer wg.Done()
err := f.poolIntf.UpdateTxStatus(ctx, tx.Hash, pool.TxStatusInvalid, false, &failedReason)
if err != nil {
log.Errorf("failed to update status to invalid in the pool for tx %s, error: %v", tx.HashStr, err)
}
}()
} else if executor.IsInvalidNonceError(errorCode) || executor.IsInvalidBalanceError(errorCode) {
var (
nonce *uint64
balance *big.Int
)
if addressInfo != nil {
nonce = addressInfo.Nonce
balance = addressInfo.Balance
}
log.Errorf("intrinsic error, moving tx %s to not ready: nonce: %d, balance: %d. gasPrice: %d, error: %v", tx.Hash, nonce, balance, tx.GasPrice, txResponse.RomError)
txsToDelete := f.workerIntf.MoveTxToNotReady(tx.Hash, tx.From, nonce, balance)
for _, txToDelete := range txsToDelete {
wg.Add(1)
txToDelete := txToDelete
go func() {
defer wg.Done()
err := f.poolIntf.UpdateTxStatus(ctx, txToDelete.Hash, pool.TxStatusFailed, false, &failedReason)
if err != nil {
log.Errorf("failed to update status to failed in the pool for tx %s, error: %v", txToDelete.Hash.String(), err)
}
}()
}
} else {
// Delete the transaction from the txSorted list
f.workerIntf.DeleteTx(tx.Hash, tx.From)
log.Debugf("tx %s deleted from txSorted list", tx.HashStr)
wg.Add(1)
go func() {
defer wg.Done()
// Update the status of the transaction to failed
err := f.poolIntf.UpdateTxStatus(ctx, tx.Hash, pool.TxStatusFailed, false, &failedReason)
if err != nil {
log.Errorf("failed to update status to failed in the pool for tx %s, error: %v", tx.Hash.String(), err)
}
}()
}
// Update metrics
f.wipL2Block.metrics.gas += txResponse.GasUsed
return wg
}
// checkIfProverRestarted checks if the proverID changed
func (f *finalizer) checkIfProverRestarted(proverID string) {
if f.proverID != "" && f.proverID != proverID {
f.LogEvent(context.Background(), event.Level_Critical, event.EventID_FinalizerRestart,
fmt.Sprintf("proverID changed from %s to %s, restarting sequencer to discard current WIP batch and work with new executor", f.proverID, proverID), nil)
log.Fatal("proverID changed from %s to %s, restarting sequencer to discard current WIP batch and work with new executor")
}
}
// logZKCounters returns a string with all the zkCounters values
func (f *finalizer) logZKCounters(counters state.ZKCounters) string {
return fmt.Sprintf("{gasUsed: %d, keccakHashes: %d, poseidonHashes: %d, poseidonPaddings: %d, memAligns: %d, arithmetics: %d, binaries: %d, sha256Hashes: %d, steps: %d}",
counters.GasUsed, counters.KeccakHashes, counters.PoseidonHashes, counters.PoseidonPaddings, counters.MemAligns, counters.Arithmetics,
counters.Binaries, counters.Sha256Hashes_V2, counters.Steps)
}
// Halt halts the finalizer
func (f *finalizer) Halt(ctx context.Context, err error, isFatal bool) {
f.haltFinalizer.Store(true)
f.LogEvent(ctx, event.Level_Critical, event.EventID_FinalizerHalt, fmt.Sprintf("finalizer halted due to error, error: %s", err), nil)
if isFatal {
log.Fatalf("fatal error on finalizer, error: %v", err)
} else {
for {
log.Errorf("halting finalizer, error: %v", err)
time.Sleep(5 * time.Second) //nolint:gomnd
}
}
}
// LogEvent adds an event for runtime debugging
func (f *finalizer) LogEvent(ctx context.Context, level event.Level, eventId event.EventID, description string, json interface{}) {
event := &event.Event{
ReceivedAt: time.Now(),
Source: event.Source_Node,
Component: event.Component_Sequencer,
Level: level,
EventID: eventId,
Description: description,
}
if json != nil {
event.Json = json
}
eventErr := f.eventLog.LogEvent(ctx, event)
if eventErr != nil {
log.Errorf("error storing log event, error: %v", eventErr)
}
}