@@ -129,8 +129,10 @@ import (
129
129
"github.com/ledgerwatch/erigon/turbo/stages/headerdownload"
130
130
"github.com/ledgerwatch/erigon/zk/contracts"
131
131
"github.com/ledgerwatch/erigon/zk/datastream/client"
132
+ "github.com/ledgerwatch/erigon/zk/datastream/server"
132
133
"github.com/ledgerwatch/erigon/zk/hermez_db"
133
134
"github.com/ledgerwatch/erigon/zk/l1_cache"
135
+ "github.com/ledgerwatch/erigon/zk/l1infotree"
134
136
"github.com/ledgerwatch/erigon/zk/legacy_executor_verifier"
135
137
zkStages "github.com/ledgerwatch/erigon/zk/stages"
136
138
"github.com/ledgerwatch/erigon/zk/syncer"
@@ -139,9 +141,10 @@ import (
139
141
"github.com/ledgerwatch/erigon/zk/utils"
140
142
"github.com/ledgerwatch/erigon/zk/witness"
141
143
"github.com/ledgerwatch/erigon/zkevm/etherman"
142
- "github.com/ledgerwatch/erigon/zk/l1infotree"
143
144
)
144
145
146
+ var dataStreamServerFactory = server .NewZkEVMDataStreamServerFactory ()
147
+
145
148
// Config contains the configuration options of the ETH protocol.
146
149
// Deprecated: use ethconfig.Config instead.
147
150
type Config = ethconfig.Config
@@ -219,7 +222,7 @@ type Ethereum struct {
219
222
logger log.Logger
220
223
221
224
// zk
222
- dataStream * datastreamer .StreamServer
225
+ streamServer server .StreamServer
223
226
l1Syncer * syncer.L1Syncer
224
227
etherManClients []* etherman.Client
225
228
l1Cache * l1_cache.L1Cache
@@ -977,16 +980,17 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
977
980
Level : "warn" ,
978
981
Outputs : nil ,
979
982
}
983
+
980
984
// todo [zkevm] read the stream version from config and figure out what system id is used for
981
- backend .dataStream , err = datastreamer . NewServer (uint16 (httpCfg .DataStreamPort ), uint8 (backend .config .DatastreamVersion ), 1 , datastreamer .StreamType (1 ), file , httpCfg .DataStreamWriteTimeout , httpCfg .DataStreamInactivityTimeout , httpCfg .DataStreamInactivityCheckInterval , logConfig )
985
+ backend .streamServer , err = dataStreamServerFactory . CreateStreamServer (uint16 (httpCfg .DataStreamPort ), uint8 (backend .config .DatastreamVersion ), 1 , datastreamer .StreamType (1 ), file , httpCfg .DataStreamWriteTimeout , httpCfg .DataStreamInactivityTimeout , httpCfg .DataStreamInactivityCheckInterval , logConfig )
982
986
if err != nil {
983
987
return nil , err
984
988
}
985
989
986
990
// recovery here now, if the stream got into a bad state we want to be able to delete the file and have
987
991
// the stream re-populated from scratch. So we check the stream for the latest header and if it is
988
992
// 0 we can just set the datastream progress to 0 also which will force a re-population of the stream
989
- latestHeader := backend .dataStream .GetHeader ()
993
+ latestHeader := backend .streamServer .GetHeader ()
990
994
if latestHeader .TotalEntries == 0 {
991
995
log .Info ("[dataStream] setting the stream progress to 0" )
992
996
backend .preStartTasks .WarmUpDataStream = true
@@ -1100,6 +1104,11 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
1100
1104
1101
1105
l1InfoTreeUpdater := l1infotree .NewUpdater (cfg .Zk , l1InfoTreeSyncer )
1102
1106
1107
+ var dataStreamServer server.DataStreamServer
1108
+ if backend .streamServer != nil {
1109
+ dataStreamServer = dataStreamServerFactory .CreateDataStreamServer (backend .streamServer , backend .chainConfig .ChainID .Uint64 ())
1110
+ }
1111
+
1103
1112
if isSequencer {
1104
1113
// if we are sequencing transactions, we do the sequencing loop...
1105
1114
witnessGenerator := witness .NewGenerator (
@@ -1129,10 +1138,9 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
1129
1138
verifier := legacy_executor_verifier .NewLegacyExecutorVerifier (
1130
1139
* cfg .Zk ,
1131
1140
legacyExecutors ,
1132
- backend .chainConfig ,
1133
1141
backend .chainDB ,
1134
1142
witnessGenerator ,
1135
- backend . dataStream ,
1143
+ dataStreamServer ,
1136
1144
)
1137
1145
1138
1146
if cfg .Zk .Limbo {
@@ -1167,7 +1175,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
1167
1175
backend .agg ,
1168
1176
backend .forkValidator ,
1169
1177
backend .engine ,
1170
- backend . dataStream ,
1178
+ dataStreamServer ,
1171
1179
backend .l1Syncer ,
1172
1180
seqVerSyncer ,
1173
1181
l1BlockSyncer ,
@@ -1209,7 +1217,7 @@ func New(ctx context.Context, stack *node.Node, config *ethconfig.Config, logger
1209
1217
backend .engine ,
1210
1218
backend .l1Syncer ,
1211
1219
streamClient ,
1212
- backend . dataStream ,
1220
+ dataStreamServer ,
1213
1221
l1InfoTreeUpdater ,
1214
1222
)
1215
1223
@@ -1330,7 +1338,11 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig
1330
1338
// apiList := jsonrpc.APIList(chainKv, borDb, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, backend.agg, httpRpcCfg, backend.engine, config, backend.l1Syncer)
1331
1339
// authApiList := jsonrpc.AuthAPIList(chainKv, ethRpcClient, txPoolRpcClient, miningRpcClient, ff, stateCache, blockReader, backend.agg, httpRpcCfg, backend.engine, config)
1332
1340
1333
- s .apiList = jsonrpc .APIList (chainKv , ethRpcClient , txPoolRpcClient , s .txPool2 , miningRpcClient , ff , stateCache , blockReader , s .agg , & httpRpcCfg , s .engine , config , s .l1Syncer , s .logger , s .dataStream )
1341
+ var dataStreamServer server.DataStreamServer
1342
+ if s .streamServer != nil {
1343
+ dataStreamServer = dataStreamServerFactory .CreateDataStreamServer (s .streamServer , config .Zk .L2ChainId )
1344
+ }
1345
+ s .apiList = jsonrpc .APIList (chainKv , ethRpcClient , txPoolRpcClient , s .txPool2 , miningRpcClient , ff , stateCache , blockReader , s .agg , & httpRpcCfg , s .engine , config , s .l1Syncer , s .logger , dataStreamServer )
1334
1346
1335
1347
if config .SilkwormRpcDaemon && httpRpcCfg .Enabled {
1336
1348
interface_log_settings := silkworm.RpcInterfaceLogSettings {
@@ -1368,7 +1380,7 @@ func (s *Ethereum) Init(stack *node.Node, config *ethconfig.Config, chainConfig
1368
1380
}
1369
1381
1370
1382
go func () {
1371
- if err := cli .StartDataStream (s .dataStream ); err != nil {
1383
+ if err := cli .StartDataStream (s .streamServer ); err != nil {
1372
1384
log .Error (err .Error ())
1373
1385
return
1374
1386
}
@@ -1391,8 +1403,9 @@ func (s *Ethereum) PreStart() error {
1391
1403
// we don't know when the server has actually started as it doesn't expose a signal that is has spun up
1392
1404
// so here we loop and take a brief pause waiting for it to be ready
1393
1405
attempts := 0
1406
+ dataStreamServer := dataStreamServerFactory .CreateDataStreamServer (s .streamServer , s .chainConfig .ChainID .Uint64 ())
1394
1407
for {
1395
- _ , err = zkStages .CatchupDatastream (s .sentryCtx , "stream-catchup" , tx , s . dataStream , s . chainConfig . ChainID . Uint64 () )
1408
+ _ , err = zkStages .CatchupDatastream (s .sentryCtx , "stream-catchup" , tx , dataStreamServer )
1396
1409
if err != nil {
1397
1410
if errors .Is (err , datastreamer .ErrAtomicOpNotAllowed ) {
1398
1411
attempts ++
0 commit comments