Skip to content

Commit 0e75a28

Browse files
fix: 0 tx blocks after limbo (0xPolygonHermez#1708) (0xPolygonHermez#1711)
* fix: 0 tx blocks after limbo * Add executor-enabled flag. It disables using remote executor even when executor-urls option is filled in, which is helpful when testing limbo mode, when txs are still sent to the remote executor.
1 parent 157d43c commit 0e75a28

9 files changed

+20
-11
lines changed

cmd/utils/flags.go

+5
Original file line numberDiff line numberDiff line change
@@ -596,6 +596,11 @@ var (
596596
Usage: "A comma separated list of grpc addresses that host executors",
597597
Value: "",
598598
}
599+
ExecutorEnabled = cli.BoolFlag{
600+
Name: "zkevm.executor-enabled",
601+
Usage: "Enables the executor. Used for testing limbo, when executor-urls are set, but we don't want to use them, only in limbo to verify limbo transactions. For this case, set it to false. Defaulted to true",
602+
Value: true,
603+
}
599604
ExecutorStrictMode = cli.BoolFlag{
600605
Name: "zkevm.executor-strict",
601606
Usage: "Defaulted to true to ensure you must set some executor URLs, bypass this restriction by setting to false",

eth/ethconfig/config_zkevm.go

+5
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ type Zk struct {
4949
ExecutorUrls []string
5050
ExecutorStrictMode bool
5151
ExecutorRequestTimeout time.Duration
52+
ExecutorEnabled bool
5253
DatastreamNewBlockTimeout time.Duration
5354
WitnessMemdbSize datasize.ByteSize
5455
WitnessUnwindLimit uint64
@@ -113,6 +114,10 @@ func (c *Zk) HasExecutors() bool {
113114
return len(c.ExecutorUrls) > 0 && c.ExecutorUrls[0] != ""
114115
}
115116

117+
func (c *Zk) UseExecutors() bool {
118+
return c.HasExecutors() && c.ExecutorEnabled
119+
}
120+
116121
// ShouldImportInitialBatch returns true in case initial batch config file name is non-empty string.
117122
func (c *Zk) ShouldImportInitialBatch() bool {
118123
return c.InitialBatchCfgFile != ""

turbo/cli/default_flags.go

+1
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,7 @@ var DefaultFlags = []cli.Flag{
217217
&utils.ExecutorUrls,
218218
&utils.ExecutorStrictMode,
219219
&utils.ExecutorRequestTimeout,
220+
&utils.ExecutorEnabled,
220221
&utils.DatastreamNewBlockTimeout,
221222
&utils.WitnessMemdbSize,
222223
&utils.WitnessUnwindLimit,

turbo/cli/flags_zkevm.go

+3-1
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) {
192192
ExecutorUrls: strings.Split(strings.ReplaceAll(ctx.String(utils.ExecutorUrls.Name), " ", ""), ","),
193193
ExecutorStrictMode: ctx.Bool(utils.ExecutorStrictMode.Name),
194194
ExecutorRequestTimeout: ctx.Duration(utils.ExecutorRequestTimeout.Name),
195+
ExecutorEnabled: ctx.Bool(utils.ExecutorEnabled.Name),
195196
DatastreamNewBlockTimeout: ctx.Duration(utils.DatastreamNewBlockTimeout.Name),
196197
WitnessMemdbSize: *witnessMemSize,
197198
WitnessUnwindLimit: witnessUnwindLimit,
@@ -244,6 +245,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) {
244245
} else {
245246
checkFlag(utils.ExecutorUrls.Name, cfg.ExecutorUrls)
246247
checkFlag(utils.ExecutorStrictMode.Name, cfg.ExecutorStrictMode)
248+
checkFlag(utils.ExecutorEnabled.Name, cfg.ExecutorEnabled)
247249
checkFlag(utils.DataStreamHost.Name, cfg.DataStreamHost)
248250
checkFlag(utils.DataStreamPort.Name, cfg.DataStreamPort)
249251
checkFlag(utils.DataStreamWriteTimeout.Name, cfg.DataStreamWriteTimeout)
@@ -261,7 +263,7 @@ func ApplyFlagsForZkConfig(ctx *cli.Context, cfg *ethconfig.Config) {
261263
panic("You cannot disable virtual counters when running in strict mode")
262264
}
263265

264-
if len(cfg.ExecutorUrls) > 0 && cfg.ExecutorUrls[0] != "" && cfg.DisableVirtualCounters {
266+
if cfg.UseExecutors() && cfg.DisableVirtualCounters {
265267
panic("You cannot disable virtual counters when running with executors")
266268
}
267269
}

turbo/stages/zk_stages.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ func NewDefaultZkStages(ctx context.Context,
5454
zkStages.StageL1SyncerCfg(db, l1Syncer, cfg.Zk),
5555
zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater),
5656
zkStages.StageBatchesCfg(db, datastreamClient, cfg.Zk, controlServer.ChainConfig, &cfg.Miner),
57-
zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()),
57+
zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion),
5858
stagedsync.StageBlockHashesCfg(db, dirs.Tmp, controlServer.ChainConfig, blockWriter),
5959
stagedsync.StageSendersCfg(db, controlServer.ChainConfig, false, dirs.Tmp, cfg.Prune, blockReader, controlServer.Hd, nil),
6060
stagedsync.StageExecuteBlocksCfg(
@@ -121,7 +121,7 @@ func NewSequencerZkStages(ctx context.Context,
121121
zkStages.StageL1SequencerSyncCfg(db, cfg.Zk, sequencerStageSyncer),
122122
zkStages.StageL1InfoTreeCfg(db, cfg.Zk, infoTreeUpdater),
123123
zkStages.StageSequencerL1BlockSyncCfg(db, cfg.Zk, l1BlockSyncer),
124-
zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion, cfg.HasExecutors()),
124+
zkStages.StageDataStreamCatchupCfg(dataStreamServer, db, cfg.Genesis.Config.ChainID.Uint64(), cfg.DatastreamVersion),
125125
zkStages.StageSequenceBlocksCfg(
126126
db,
127127
cfg.Prune,

zk/stages/stage_data_stream_catch_up.go

+1-3
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,13 @@ type DataStreamCatchupCfg struct {
1818
db kv.RwDB
1919
dataStreamServer server.DataStreamServer
2020
streamVersion int
21-
hasExecutors bool
2221
}
2322

24-
func StageDataStreamCatchupCfg(dataStreamServer server.DataStreamServer, db kv.RwDB, chainId uint64, streamVersion int, hasExecutors bool) DataStreamCatchupCfg {
23+
func StageDataStreamCatchupCfg(dataStreamServer server.DataStreamServer, db kv.RwDB, chainId uint64, streamVersion int) DataStreamCatchupCfg {
2524
return DataStreamCatchupCfg{
2625
dataStreamServer: dataStreamServer,
2726
db: db,
2827
streamVersion: streamVersion,
29-
hasExecutors: hasExecutors,
3028
}
3129
}
3230

zk/stages/stage_data_stream_catch_up_test.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ func TestSpawnStageDataStreamCatchup(t *testing.T) {
8989

9090
dataStreamServerMock.EXPECT().WriteBlocksToStreamConsecutively(ctx, s.LogPrefix(), tx1, hDBReaderMatcher, uint64(1), uint64(20)).Return(nil)
9191

92-
cfg := StageDataStreamCatchupCfg(dataStreamServerMock, db1, chainID, streamVersion, true)
92+
cfg := StageDataStreamCatchupCfg(dataStreamServerMock, db1, chainID, streamVersion)
9393

9494
// Act
9595
err = SpawnStageDataStreamCatchup(s, ctx, tx1, cfg)

zk/stages/stage_sequence_execute.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -117,7 +117,7 @@ func sequencingBatchStep(
117117
var block *types.Block
118118
runLoopBlocks := true
119119
batchContext := newBatchContext(ctx, &cfg, &historyCfg, s, sdb)
120-
batchState := newBatchState(forkId, batchNumberForStateInitialization, executionAt+1, cfg.zk.HasExecutors(), cfg.zk.L1SyncStartBlock > 0, cfg.txPool, resequenceBatchJob)
120+
batchState := newBatchState(forkId, batchNumberForStateInitialization, executionAt+1, cfg.zk.UseExecutors(), cfg.zk.L1SyncStartBlock > 0, cfg.txPool, resequenceBatchJob)
121121
blockDataSizeChecker := NewBlockDataChecker(cfg.zk.ShouldCountersBeUnlimited(batchState.isL1Recovery()))
122122
streamWriter := newSequencerBatchStreamWriter(batchContext, batchState)
123123

zk/txpool/pool.go

+1-3
Original file line numberDiff line numberDiff line change
@@ -395,8 +395,6 @@ func New(newTxs chan types.Announcements, coreDB kv.RoDB, cfg txpoolcfg.Config,
395395
func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChangeBatch, unwindTxs, minedTxs types.TxSlots, tx kv.Tx) error {
396396
defer newBlockTimer.UpdateDuration(time.Now())
397397

398-
isAfterLimbo := len(unwindTxs.Txs) > 0 && p.isDeniedYieldingTransactions()
399-
400398
cache := p.cache()
401399
cache.OnNewBlock(stateChanges)
402400
coreTx, err := p.coreDB().BeginRo(ctx)
@@ -524,7 +522,7 @@ func (p *TxPool) OnNewBlock(ctx context.Context, stateChanges *remote.StateChang
524522
log.Info("[txpool] Discarding", "tx-hash", hexutils.BytesToHex(slot.IDHash[:]))
525523
}
526524
p.finalizeLimboOnNewBlock(limboTxs)
527-
if isAfterLimbo {
525+
if p.isDeniedYieldingTransactions() {
528526
p.allowYieldingTransactions()
529527
}
530528

0 commit comments

Comments
 (0)