From 517d5cc12525cdf42721e315e6ee8ca3f55e8308 Mon Sep 17 00:00:00 2001 From: Joan Esteban <129153821+joanestebanr@users.noreply.github.com> Date: Wed, 26 Jul 2023 09:57:48 +0200 Subject: [PATCH] Release/v0.2.0 to develop (#2322) Merge release/0.2.0 --- .github/workflows/jsonschema.yml | 61 + .gitignore | 4 +- Dockerfile | 3 +- Makefile | 57 +- cmd/jsonschema.go | 25 + cmd/main.go | 23 + cmd/run.go | 13 +- config/config.go | 82 +- config/config_test.go | 32 +- config/default.go | 9 +- .../environments/local/local.node.config.toml | 10 +- .../mainnet/public.prover.config.json | 7 +- .../testnet/testnet.prover.config.json | 7 +- config/gen_json_schema.go | 249 ++ config/gen_json_schema_test.go | 354 ++ config/network.go | 48 +- config/types/duration.go | 19 +- db/config.go | 8 +- db/migrations/state/0007.sql | 2 +- docker-compose.yml | 2 +- .../custom_network-config-doc.html | 1 + docs/config-file/custom_network-config-doc.md | 121 + .../custom_network-config-schema.json | 88 + docs/config-file/node-config-doc.html | 67 + docs/config-file/node-config-doc.md | 3089 +++++++++++++++++ docs/config-file/node-config-schema.json | 1256 +++++++ docs/config-file/schema_doc.css | 192 + docs/config-file/schema_doc.min.js | 1 + docs/config-file/templates/js/badge_type.html | 14 + docs/config-file/templates/js/base.html | 42 + .../templates/js/breadcrumbs_no_object.html | 10 + .../templates/js/breadcrumbs_object.html | 15 + docs/config-file/templates/js/content.html | 120 + .../templates/js/macro_restriction.html | 3 + docs/config-file/templates/js/schema_doc.css | 192 + docs/config-file/templates/js/schema_doc.js | 66 + .../templates/js/schema_doc.min.js | 1 + .../templates/js/section_array.html | 36 + .../js/section_conditional_subschema.html | 58 + .../templates/js/section_description.html | 16 + .../templates/js/section_examples.html | 18 + .../config-file/templates/js/section_not.html | 8 + .../templates/js/section_properties_2.html | 4 + .../js/section_properties_object.html | 60 + ...tion_undocumented_required_properties.html | 11 + .../templates/js/tabbed_section.html | 25 + docs/config-file/templates/md/base.md | 17 + docs/config-file/templates/md/breadcrumbs.md | 24 + docs/config-file/templates/md/content.md | 111 + .../templates/md/generate_toml_example.md | 22 + .../config-file/templates/md/section_array.md | 38 + .../md/section_conditional_subschema.md | 24 + .../templates/md/section_description.md | 4 + .../templates/md/section_examples.md | 16 + docs/config-file/templates/md/section_not.md | 4 + .../templates/md/section_one_of.md | 4 + .../md/section_properties_details.md | 32 + ...ection_undocumented_required_properties.md | 7 + .../templates/md/tabbed_section.md | 11 + docs/configuration.md | 42 + docs/snap_restore.md | 85 + etherman/config.go | 13 +- etherman/etherman.go | 10 +- etherman/etherscan/etherscan.go | 4 +- event/event.go | 3 +- event/eventlog.go | 4 +- gasprice/config.go | 5 +- gasprice/follower.go | 7 +- gasprice/follower_test.go | 21 + go.mod | 14 + go.sum | 44 + jsonrpc/endpoints_eth.go | 12 +- jsonrpc/endpoints_eth_test.go | 2 +- jsonrpc/types/types.go | 11 +- log/config.go | 8 +- metrics/config.go | 18 +- pool/config.go | 15 - pool/errors.go | 3 - pool/pgpoolstorage/pgpoolstorage.go | 8 +- pool/pool.go | 51 +- pool/pool_test.go | 18 +- proto/src/proto/executor/v1/executor.proto | 4 +- sequencer/addrqueue.go | 33 +- sequencer/addrqueue_test.go | 79 +- sequencer/closingsignalsmanager_test.go | 3 +- sequencer/config.go | 18 +- sequencer/dbmanager.go | 37 +- sequencer/dbmanager_test.go | 4 +- sequencer/effective_gas_price.go | 124 +- sequencer/effective_gas_price_test.go | 62 +- sequencer/errors.go | 11 + sequencer/finalizer.go | 160 +- sequencer/finalizer_test.go | 37 +- sequencer/interfaces.go | 14 +- sequencer/mock_db_manager.go | 68 +- sequencer/mock_pool.go | 55 +- sequencer/mock_state.go | 28 +- sequencer/mock_worker.go | 18 +- sequencer/txtracker.go | 2 +- sequencer/worker.go | 28 +- sequencer/worker_test.go | 2 +- state/batch.go | 45 +- state/converters.go | 33 +- state/genesis.go | 8 +- state/helper.go | 26 +- state/helper_test.go | 35 + state/pgstatestorage.go | 2 +- state/runtime/executor/client.go | 5 +- state/runtime/executor/errors.go | 292 +- .../runtime/executor/{pb => }/executor.pb.go | 105 +- .../executor/{pb => }/executor_grpc.pb.go | 2 +- state/runtime/runtime.go | 2 + state/state.go | 6 +- state/state_test.go | 57 +- state/transaction.go | 34 +- synchronizer/interfaces.go | 7 +- synchronizer/mock_state.go | 80 +- synchronizer/synchronizer.go | 191 +- synchronizer/synchronizer_test.go | 53 +- test/config/debug.node.config.toml | 10 +- test/config/test.node.config.toml | 9 +- .../test.permissionless.prover.config.json | 10 +- test/config/test.prover.config.json | 10 +- test/docker-compose.yml | 18 +- tools/executor/main.go | 7 +- 125 files changed, 8237 insertions(+), 838 deletions(-) create mode 100644 .github/workflows/jsonschema.yml create mode 100644 cmd/jsonschema.go create mode 100644 config/gen_json_schema.go create mode 100644 config/gen_json_schema_test.go create mode 100644 docs/config-file/custom_network-config-doc.html create mode 100644 docs/config-file/custom_network-config-doc.md create mode 100644 docs/config-file/custom_network-config-schema.json create mode 100644 docs/config-file/node-config-doc.html create mode 100644 docs/config-file/node-config-doc.md create mode 100644 docs/config-file/node-config-schema.json create mode 100644 docs/config-file/schema_doc.css create mode 100644 docs/config-file/schema_doc.min.js create mode 100644 docs/config-file/templates/js/badge_type.html create mode 100644 docs/config-file/templates/js/base.html create mode 100644 docs/config-file/templates/js/breadcrumbs_no_object.html create mode 100644 docs/config-file/templates/js/breadcrumbs_object.html create mode 100644 docs/config-file/templates/js/content.html create mode 100644 docs/config-file/templates/js/macro_restriction.html create mode 100644 docs/config-file/templates/js/schema_doc.css create mode 100644 docs/config-file/templates/js/schema_doc.js create mode 100644 docs/config-file/templates/js/schema_doc.min.js create mode 100644 docs/config-file/templates/js/section_array.html create mode 100644 docs/config-file/templates/js/section_conditional_subschema.html create mode 100644 docs/config-file/templates/js/section_description.html create mode 100644 docs/config-file/templates/js/section_examples.html create mode 100644 docs/config-file/templates/js/section_not.html create mode 100644 docs/config-file/templates/js/section_properties_2.html create mode 100644 docs/config-file/templates/js/section_properties_object.html create mode 100644 docs/config-file/templates/js/section_undocumented_required_properties.html create mode 100644 docs/config-file/templates/js/tabbed_section.html create mode 100644 docs/config-file/templates/md/base.md create mode 100644 docs/config-file/templates/md/breadcrumbs.md create mode 100644 docs/config-file/templates/md/content.md create mode 100644 docs/config-file/templates/md/generate_toml_example.md create mode 100644 docs/config-file/templates/md/section_array.md create mode 100644 docs/config-file/templates/md/section_conditional_subschema.md create mode 100644 docs/config-file/templates/md/section_description.md create mode 100644 docs/config-file/templates/md/section_examples.md create mode 100644 docs/config-file/templates/md/section_not.md create mode 100644 docs/config-file/templates/md/section_one_of.md create mode 100644 docs/config-file/templates/md/section_properties_details.md create mode 100644 docs/config-file/templates/md/section_undocumented_required_properties.md create mode 100644 docs/config-file/templates/md/tabbed_section.md create mode 100644 docs/configuration.md create mode 100644 docs/snap_restore.md rename state/runtime/executor/{pb => }/executor.pb.go (95%) rename state/runtime/executor/{pb => }/executor_grpc.pb.go (99%) diff --git a/.github/workflows/jsonschema.yml b/.github/workflows/jsonschema.yml new file mode 100644 index 0000000000..9caaa30aae --- /dev/null +++ b/.github/workflows/jsonschema.yml @@ -0,0 +1,61 @@ +--- +name: JSON schema +on: + push: + branches: + - main + - master + - develop + - update-external-dependencies + - 'release/**' + pull_request: + +jobs: + json-schema: + strategy: + matrix: + go-version: [ 1.19.x ] + goarch: [ "amd64" ] + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v3 + # https://github.com/actions/checkout#Checkout-pull-request-HEAD-commit-instead-of-merge-commit + # Checkout pull request HEAD commit instead of merge commit + with: + ref: ${{ github.event.pull_request.head.sha }} + + - name: Install Go + uses: actions/setup-go@v3 + with: + go-version: ${{ matrix.go-version }} + env: + GOARCH: ${{ matrix.goarch }} + + - uses: actions/setup-python@v1 + - uses: BSFishy/pip-action@v1 + with: + packages: | + json-schema-for-humans + + - name: Check if JSON schema and generated doc is up to date + run: | + EXPECTED_DIFF="" + NOT_UPDATED_MSG="JSON Schema is not up to date, run 'make config-doc-gen' before creating the PR" + + echo "Checking if JSON schema is up to date..." + make GENERATE_DOC_PATH=/tmp/ config-doc-gen + for CHECK_FILE in "node-config-schema.json" "node-config-doc.md" "node-config-doc.html" "custom_network-config-schema.json" "custom_network-config-doc.md" "custom_network-config-doc.html"; do + EXPECTED_FILE=tmp/$CHECK_FILE + REAL_FILE=docs/config-file/$CHECK_FILE + echo "checking $CHECK_FILE ...." + diff /tmp/$CHECK_FILE docs/config-file/$CHECK_FILE + if [ $? -ne 0 ]; then + echo " FAILED file $CHECK_FILE!" + exit 1 + fi + echo "checked $CHECK_FILE OK" + done + + echo "Everything up to date" + diff --git a/.gitignore b/.gitignore index f58fd80ca0..4907cd4699 100644 --- a/.gitignore +++ b/.gitignore @@ -16,4 +16,6 @@ .env out.dat -cmd/__debug_bin \ No newline at end of file +cmd/__debug_bin + +.venv \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 8078d867bc..8719b468b5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,8 +12,9 @@ RUN cd /src/db && packr2 RUN cd /src && make build # CONTAINER FOR RUNNING BINARY -FROM alpine:3.16.0 +FROM alpine:3.18.0 COPY --from=build /src/dist/zkevm-node /app/zkevm-node COPY --from=build /src/config/environments/testnet/testnet.node.config.toml /app/example.config.toml +RUN apk update && apk add postgresql15-client EXPOSE 8123 CMD ["/bin/sh", "-c", "/app/zkevm-node run"] diff --git a/Makefile b/Makefile index d31bad06c0..43a49ee882 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,15 @@ LDFLAGS += -X 'github.com/0xPolygonHermez/zkevm-node.GitRev=$(GITREV)' LDFLAGS += -X 'github.com/0xPolygonHermez/zkevm-node.GitBranch=$(GITBRANCH)' LDFLAGS += -X 'github.com/0xPolygonHermez/zkevm-node.BuildDate=$(DATE)' +# Variables +VENV = .venv +VENV_PYTHON = $(VENV)/bin/python +SYSTEM_PYTHON = $(or $(shell which python3), $(shell which python)) +PYTHON = $(or $(wildcard $(VENV_PYTHON)), "install_first_venv") +GENERATE_SCHEMA_DOC = $(VENV)/bin/generate-schema-doc +GENERATE_DOC_PATH= "docs/config-file/" +GENERATE_DOC_TEMPLATES_PATH= "docs/config-file/templates/" + .PHONY: build build: ## Builds the binary locally into ./dist $(GOENVVARS) go build -ldflags "all=$(LDFLAGS)" -o $(GOBIN)/$(GOBINARY) $(GOCMD) @@ -54,6 +63,52 @@ install-linter: ## Installs the linter lint: ## Runs the linter export "GOROOT=$$(go env GOROOT)" && $$(go env GOPATH)/bin/golangci-lint run + +$(VENV_PYTHON): + rm -rf $(VENV) + $(SYSTEM_PYTHON) -m venv $(VENV) + +venv: $(VENV_PYTHON) + +# https://stackoverflow.com/questions/24736146/how-to-use-virtualenv-in-makefile +.PHONY: install-config-doc-gen +$(GENERATE_SCHEMA_DOC): $(VENV_PYTHON) + $(PYTHON) -m pip install --upgrade pip + $(PYTHON) -m pip install json-schema-for-humans + +PHONY: config-doc-gen +config-doc-gen: config-doc-node config-doc-custom_network ## Generate config file's json-schema for node and custom_network and documentation + # + +.PHONY: config-doc-node +config-doc-node: $(GENERATE_SCHEMA_DOC) ## Generate config file's json-schema for node and documentation + go run ./cmd generate-json-schema --config-file=node --output=$(GENERATE_DOC_PATH)node-config-schema.json + $(GENERATE_SCHEMA_DOC) --config show_breadcrumbs=true \ + --config footer_show_time=false \ + --config expand_buttons=true \ + --config custom_template_path=$(GENERATE_DOC_TEMPLATES_PATH)/js/base.html \ + $(GENERATE_DOC_PATH)node-config-schema.json \ + $(GENERATE_DOC_PATH)node-config-doc.html + $(GENERATE_SCHEMA_DOC) --config custom_template_path=$(GENERATE_DOC_TEMPLATES_PATH)/md/base.md \ + --config footer_show_time=false \ + $(GENERATE_DOC_PATH)node-config-schema.json \ + $(GENERATE_DOC_PATH)node-config-doc.md + +.PHONY: config-doc-custom_network +config-doc-custom_network: $(GENERATE_SCHEMA_DOC) ## Generate config file's json-schema for custom_network and documentation + go run ./cmd generate-json-schema --config-file=custom_network --output=$(GENERATE_DOC_PATH)custom_network-config-schema.json + $(GENERATE_SCHEMA_DOC) --config show_breadcrumbs=true --config footer_show_time=false \ + --config expand_buttons=true \ + --config custom_template_path=$(GENERATE_DOC_TEMPLATES_PATH)/js/base.html \ + $(GENERATE_DOC_PATH)custom_network-config-schema.json \ + $(GENERATE_DOC_PATH)custom_network-config-doc.html + $(GENERATE_SCHEMA_DOC) --config custom_template_path=$(GENERATE_DOC_TEMPLATES_PATH)/md/base.md \ + --config footer_show_time=false \ + --config example_format=JSON \ + $(GENERATE_DOC_PATH)custom_network-config-schema.json \ + $(GENERATE_DOC_PATH)custom_network-config-doc.md + + .PHONY: update-external-dependencies update-external-dependencies: ## Updates external dependencies like images, test vectors or proto files go run ./scripts/cmd/... updatedeps @@ -65,7 +120,7 @@ install-git-hooks: ## Moves hook files to the .git/hooks directory .PHONY: generate-code-from-proto generate-code-from-proto: ## Generates code from proto files cd proto/src/proto/hashdb/v1 && protoc --proto_path=. --proto_path=../../../../include --go_out=../../../../../merkletree/pb --go-grpc_out=../../../../../merkletree/pb --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative hashdb.proto - cd proto/src/proto/executor/v1 && protoc --proto_path=. --go_out=../../../../../state/runtime/executor/pb --go-grpc_out=../../../../../state/runtime/executor/pb --go-grpc_opt=paths=source_relative --go_opt=paths=source_relative executor.proto + cd proto/src/proto/executor/v1 && protoc --proto_path=. --go_out=../../../../../state/runtime/executor --go-grpc_out=../../../../../state/runtime/executor --go-grpc_opt=paths=source_relative --go_opt=paths=source_relative executor.proto cd proto/src/proto/aggregator/v1 && protoc --proto_path=. --proto_path=../../../../include --go_out=../../../../../aggregator/pb --go-grpc_out=../../../../../aggregator/pb --go-grpc_opt=paths=source_relative --go_opt=paths=source_relative aggregator.proto ## Help display. diff --git a/cmd/jsonschema.go b/cmd/jsonschema.go new file mode 100644 index 0000000000..42c6402bd5 --- /dev/null +++ b/cmd/jsonschema.go @@ -0,0 +1,25 @@ +package main + +import ( + "github.com/0xPolygonHermez/zkevm-node/config" + "github.com/urfave/cli/v2" +) + +func genJSONSchema(cli *cli.Context) error { + file_config := cli.String(config.FlagDocumentationFileType) + output := cli.String(config.FlagOutputFile) + switch file_config { + case NODE_CONFIGFILE: + { + generator := config.NewNodeConfigJsonSchemaGenerater() + return generator.GenerateJsonSchemaAndWriteToFile(cli, output) + } + case NETWORK_CONFIGFILE: + { + generator := config.NewNetworkConfigJsonSchemaGenerater() + return generator.GenerateJsonSchemaAndWriteToFile(cli, output) + } + default: + panic("Not supported this config file: " + file_config) + } +} diff --git a/cmd/main.go b/cmd/main.go index 631bcd9e21..7086e8994a 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -30,6 +30,13 @@ const ( SEQUENCE_SENDER = "sequence-sender" ) +const ( + // NODE_CONFIGFILE name to identify the node config-file + NODE_CONFIGFILE = "node" + // NETWORK_CONFIGFILE name to identify the netowk_custom (genesis) config-file + NETWORK_CONFIGFILE = "custom_network" +) + var ( configFileFlag = cli.StringFlag{ Name: config.FlagCfg, @@ -75,6 +82,16 @@ var ( Usage: "Blocks the migrations in stateDB to not run them", Required: false, } + outputFileFlag = cli.StringFlag{ + Name: config.FlagOutputFile, + Usage: "Indicate the output file", + Required: true, + } + documentationFileTypeFlag = cli.StringFlag{ + Name: config.FlagDocumentationFileType, + Usage: fmt.Sprintf("Indicate the type of file to generate json-schema: %v,%v ", NODE_CONFIGFILE, NETWORK_CONFIGFILE), + Required: true, + } ) func main() { @@ -149,6 +166,12 @@ func main() { Action: dumpState, Flags: dumpStateFlags, }, + { + Name: "generate-json-schema", + Usage: "Generate the json-schema for the configuration file, and store it on docs/schema.json", + Action: genJSONSchema, + Flags: []cli.Flag{&outputFileFlag, &documentationFileTypeFlag}, + }, { Name: "snapshot", Aliases: []string{"snap"}, diff --git a/cmd/run.go b/cmd/run.go index b102e3f0aa..f85f3c1f52 100644 --- a/cmd/run.go +++ b/cmd/run.go @@ -32,7 +32,6 @@ import ( "github.com/0xPolygonHermez/zkevm-node/sequencesender" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" - executorpb "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" "github.com/0xPolygonHermez/zkevm-node/synchronizer" "github.com/jackc/pgx/v4/pgxpool" "github.com/prometheus/client_golang/prometheus/promhttp" @@ -126,11 +125,9 @@ func start(cliCtx *cli.Context) error { } currentForkID := forkIDIntervals[len(forkIDIntervals)-1].ForkId - log.Infof("Fork ID read from POE SC = %v", currentForkID) + log.Infof("Fork ID read from POE SC = %v", forkIDIntervals[len(forkIDIntervals)-1].ForkId) c.Aggregator.ChainID = l2ChainID c.Aggregator.ForkId = currentForkID - c.Sequencer.DBManager.ForkID = currentForkID - c.Sequencer.Finalizer.ForkID = currentForkID log.Infof("Chain ID read from POE SC = %v", l2ChainID) ctx := context.Background() @@ -218,7 +215,7 @@ func start(cliCtx *cli.Context) error { if poolInstance == nil { poolInstance = createPool(c.Pool, l2ChainID, st, eventLog) } - go runSynchronizer(*c, etherman, etm, st, poolInstance) + go runSynchronizer(*c, etherman, etm, st, poolInstance, eventLog) case ETHTXMANAGER: ev.Component = event.Component_EthTxManager ev.Description = "Running eth tx manager service" @@ -286,7 +283,7 @@ func newEtherman(c config.Config) (*etherman.Client, error) { return etherman, nil } -func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManager *ethtxmanager.Client, st *state.State, pool *pool.Pool) { +func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManager *ethtxmanager.Client, st *state.State, pool *pool.Pool, eventLog *event.EventLog) { var trustedSequencerURL string var err error if !cfg.IsTrustedSequencer { @@ -305,7 +302,7 @@ func runSynchronizer(cfg config.Config, etherman *etherman.Client, ethTxManager sy, err := synchronizer.NewSynchronizer( cfg.IsTrustedSequencer, etherman, st, pool, ethTxManager, - zkEVMClient, cfg.NetworkConfig.Genesis, cfg.Synchronizer, + zkEVMClient, eventLog, cfg.NetworkConfig.Genesis, cfg.Synchronizer, ) if err != nil { log.Fatal(err) @@ -457,7 +454,7 @@ func newState(ctx context.Context, c *config.Config, l2ChainID uint64, forkIDInt stateDb := state.NewPostgresStorage(sqlDB) // Executor - var executorClient executorpb.ExecutorServiceClient + var executorClient executor.ExecutorServiceClient if needsExecutor { executorClient, _, _ = executor.NewExecutorClient(ctx, c.Executor) } diff --git a/config/config.go b/config/config.go index 65cf2009d7..98f28fc48d 100644 --- a/config/config.go +++ b/config/config.go @@ -48,32 +48,74 @@ const ( FlagPassword = "password" // FlagMigrations is the flag for migrations. FlagMigrations = "migrations" + // FlagOutputFile is the flag for the output file + FlagOutputFile = "output" // FlagMaxAmount is the flag to avoid to use the flag FlagAmount FlagMaxAmount = "max-amount" + // FlagDocumentationFileType is the flag for the choose which file generate json-schema + FlagDocumentationFileType = "config-file" ) -// Config represents the configuration of the entire Hermez Node +/* +Config represents the configuration of the entire Hermez Node +The file is [TOML format] +You could find some examples: + - `config/environments/local/local.node.config.toml`: running a permisionless node + - `config/environments/mainnet/public.node.config.toml` + - `config/environments/public/public.node.config.toml` + - `test/config/test.node.config.toml`: configuration for a trusted node used in CI + +[TOML format]: https://en.wikipedia.org/wiki/TOML +*/ type Config struct { - IsTrustedSequencer bool `mapstructure:"IsTrustedSequencer"` + // This define is a trusted node (`true`) or a permission less (`false`). If you don't known + // set to `false` + IsTrustedSequencer bool `mapstructure:"IsTrustedSequencer"` + // Last batch number before a forkid change (fork upgrade). That implies that + // greater batch numbers are going to be trusted but no virtualized neither verified. + // So after the batch number `ForkUpgradeBatchNumber` is virtualized and verified you could update + // the system (SC,...) to new forkId and remove this value to allow the system to keep + // Virtualizing and verifying the new batchs. + // Check issue [#2236](https://github.com/0xPolygonHermez/zkevm-node/issues/2236) to known more + // This value overwrite `SequenceSender.ForkUpgradeBatchNumber` ForkUpgradeBatchNumber uint64 `mapstructure:"ForkUpgradeBatchNumber"` - ForkUpgradeNewForkId uint64 `mapstructure:"ForkUpgradeNewForkId"` - Log log.Config - Etherman etherman.Config - EthTxManager ethtxmanager.Config - Pool pool.Config - RPC jsonrpc.Config - Synchronizer synchronizer.Config - Sequencer sequencer.Config - SequenceSender sequencesender.Config - Aggregator aggregator.Config - NetworkConfig NetworkConfig - L2GasPriceSuggester gasprice.Config - Executor executor.Config - MTClient merkletree.Config - StateDB db.Config - Metrics metrics.Config - EventLog event.Config - HashDB db.Config + // Which is the new forkId + ForkUpgradeNewForkId uint64 `mapstructure:"ForkUpgradeNewForkId"` + // Configure Log level for all the services, allow also to store the logs in a file + Log log.Config + // Configuration of the etherman (client for access L1) + Etherman etherman.Config + // Configuration for ethereum transaction manager + EthTxManager ethtxmanager.Config + // Pool service configuration + Pool pool.Config + // Configuration for RPC service. THis one offers a extended Ethereum JSON-RPC API interface to interact with the node + RPC jsonrpc.Config + // Configuration of service `Syncrhonizer`. For this service is also really important the value of `IsTrustedSequencer` + // because depending of this values is going to ask to a trusted node for trusted transactions or not + Synchronizer synchronizer.Config + // Configuration of the sequencer service + Sequencer sequencer.Config + // Configuration of the sequence sender service + SequenceSender sequencesender.Config + // Configuration of the aggregator service + Aggregator aggregator.Config + // Configuration of the genesis of the network. This is used to known the initial state of the network + NetworkConfig NetworkConfig + // Configuration of the gas price suggester service + L2GasPriceSuggester gasprice.Config + // Configuration of the executor service + Executor executor.Config + // Configuration of the merkle tree client service. Not use in the node, only for testing + MTClient merkletree.Config + // Configuration of the state database connection + StateDB db.Config + // Configuration of the metrics service, basically is where is going to publish the metrics + Metrics metrics.Config + // Configuration of the event database connection + EventLog event.Config + // Configuration of the hash database connection + HashDB db.Config } // Default parses the default configuration values. diff --git a/config/config_test.go b/config/config_test.go index 9e1dd99d26..269a2e19ba 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -172,6 +172,10 @@ func Test_Defaults(t *testing.T) { path: "Sequencer.Finalizer.ForcedBatchesFinalityNumberOfBlocks", expectedValue: uint64(64), }, + { + path: "Sequencer.Finalizer.StopSequencerOnBatchNum", + expectedValue: uint64(0), + }, { path: "Sequencer.Finalizer.TimestampResolution", expectedValue: types.NewDuration(10 * time.Second), @@ -180,6 +184,18 @@ func Test_Defaults(t *testing.T) { path: "Sequencer.EffectiveGasPrice.MaxBreakEvenGasPriceDeviationPercentage", expectedValue: uint64(10), }, + { + path: "Sequencer.EffectiveGasPrice.L1GasPriceFactor", + expectedValue: float64(0.25), + }, + { + path: "Sequencer.EffectiveGasPrice.ByteGasCost", + expectedValue: uint64(16), + }, + { + path: "Sequencer.EffectiveGasPrice.MarginFactor", + expectedValue: float64(1), + }, { path: "Sequencer.EffectiveGasPrice.Enabled", expectedValue: false, @@ -252,6 +268,10 @@ func Test_Defaults(t *testing.T) { path: "L2GasPriceSuggester.DefaultGasPriceWei", expectedValue: uint64(2000000000), }, + { + path: "L2GasPriceSuggester.MaxGasPriceWei", + expectedValue: uint64(0), + }, { path: "MTClient.URI", expectedValue: "zkevm-prover:50061", @@ -345,18 +365,6 @@ func Test_Defaults(t *testing.T) { path: "Pool.DB.MaxConns", expectedValue: 200, }, - { - path: "Pool.EffectiveGasPrice.L1GasPriceFactor", - expectedValue: float64(0.25), - }, - { - path: "Pool.EffectiveGasPrice.ByteGasCost", - expectedValue: uint64(16), - }, - { - path: "Pool.EffectiveGasPrice.MarginFactor", - expectedValue: float64(1), - }, { path: "RPC.Host", expectedValue: "0.0.0.0", diff --git a/config/default.go b/config/default.go index 5b19a2df41..8d44065ab8 100644 --- a/config/default.go +++ b/config/default.go @@ -38,10 +38,6 @@ GlobalQueue = 1024 Port = "5432" EnableLog = false MaxConns = 200 - [Pool.EffectiveGasPrice] - L1GasPriceFactor = 0.25 - ByteGasCost = 16 - MarginFactor = 1 [Etherman] URL = "http://localhost:8545" @@ -109,6 +105,7 @@ MaxTxLifetime = "3h" ClosingSignalsManagerWaitForCheckingForcedBatches = "10s" ForcedBatchesFinalityNumberOfBlocks = 64 TimestampResolution = "10s" + StopSequencerOnBatchNum = 0 [Sequencer.DBManager] PoolRetrievalInterval = "500ms" L2ReorgRetrievalInterval = "5s" @@ -116,6 +113,9 @@ MaxTxLifetime = "3h" ResourceCostMultiplier = 1000 [Sequencer.EffectiveGasPrice] MaxBreakEvenGasPriceDeviationPercentage = 10 + L1GasPriceFactor = 0.25 + ByteGasCost = 16 + MarginFactor = 1 Enabled = false [SequenceSender] @@ -141,6 +141,7 @@ Type = "follower" UpdatePeriod = "10s" Factor = 0.15 DefaultGasPriceWei = 2000000000 +MaxGasPriceWei = 0 CleanHistoryPeriod = "1h" CleanHistoryTimeRetention = "5m" diff --git a/config/environments/local/local.node.config.toml b/config/environments/local/local.node.config.toml index 508dcf2df4..cc03b03701 100644 --- a/config/environments/local/local.node.config.toml +++ b/config/environments/local/local.node.config.toml @@ -1,3 +1,4 @@ + IsTrustedSequencer = false [Log] @@ -30,10 +31,6 @@ PollMinAllowedGasPriceInterval = "15s" Port = "5432" EnableLog = false MaxConns = 200 - [Pool.EffectiveGasPrice] - L1GasPriceFactor = 0.25 - ByteGasCost = 16 - MarginFactor = 1 [Etherman] URL = "http://your.L1node.url" @@ -94,6 +91,7 @@ MaxTxLifetime = "3h" ClosingSignalsManagerWaitForCheckingForcedBatches = "10s" ForcedBatchesFinalityNumberOfBlocks = 64 TimestampResolution = "10s" + StopSequencerOnBatchNum = 0 [Sequencer.DBManager] PoolRetrievalInterval = "500ms" L2ReorgRetrievalInterval = "5s" @@ -101,6 +99,9 @@ MaxTxLifetime = "3h" ResourceCostMultiplier = 1000 [Sequencer.EffectiveGasPrice] MaxBreakEvenGasPriceDeviationPercentage = 10 + L1GasPriceFactor = 0.25 + ByteGasCost = 16 + MarginFactor = 1 Enabled = false [SequenceSender] @@ -132,6 +133,7 @@ PrivateKeys = [ [L2GasPriceSuggester] Type = "default" DefaultGasPriceWei = 1000000000 +MaxGasPriceWei = 0 [MTClient] URI = "zkevm-prover:50061" diff --git a/config/environments/mainnet/public.prover.config.json b/config/environments/mainnet/public.prover.config.json index 4b472f6636..8fcc181715 100644 --- a/config/environments/mainnet/public.prover.config.json +++ b/config/environments/mainnet/public.prover.config.json @@ -109,5 +109,8 @@ "requestsPersistence": 3600, "maxExecutorThreads": 20, "maxProverThreads": 8, - "maxHashDBThreads": 8 -} \ No newline at end of file + "maxHashDBThreads": 8, + "ECRecoverPrecalc": true, + "ECRecoverPrecalcNThreads": 16, + "dbMultiWriteSinglePosition": true +} diff --git a/config/environments/testnet/testnet.prover.config.json b/config/environments/testnet/testnet.prover.config.json index 4b472f6636..8fcc181715 100644 --- a/config/environments/testnet/testnet.prover.config.json +++ b/config/environments/testnet/testnet.prover.config.json @@ -109,5 +109,8 @@ "requestsPersistence": 3600, "maxExecutorThreads": 20, "maxProverThreads": 8, - "maxHashDBThreads": 8 -} \ No newline at end of file + "maxHashDBThreads": 8, + "ECRecoverPrecalc": true, + "ECRecoverPrecalcNThreads": 16, + "dbMultiWriteSinglePosition": true +} diff --git a/config/gen_json_schema.go b/config/gen_json_schema.go new file mode 100644 index 0000000000..a57a20c002 --- /dev/null +++ b/config/gen_json_schema.go @@ -0,0 +1,249 @@ +package config + +import ( + "encoding/json" + "os" + "reflect" + + "github.com/0xPolygonHermez/zkevm-node/config/types" + "github.com/invopop/jsonschema" + "github.com/urfave/cli/v2" +) + +// ConfigJsonSchemaGenerater are the parameters to generate a json-schema based on the T struct +// The parametrization of the function are used for unittest +type ConfigJsonSchemaGenerater[T any] struct { + repoName string + repoNameSuffix string + // It force to remove any required field in json-schema + cleanRequiredField bool + // It read the comments in the code and add as description in schema + addCodeCommentsToSchema bool + // Check if there are mapstructure that renames the fields + checkNoMapStructureIsRenamingFields bool + // source directories for extract comments + pathSourceCode string + // Struct with the default values to set + defaultValues *T + // NetworkConfig is read from Genesis file, so make sense to remove + // from general config file + removeNetworkConfig bool +} + +// NewNodeConfigJsonSchemaGenerater returns a new class for generating json-schema of the node config file (.toml) +func NewNodeConfigJsonSchemaGenerater() ConfigJsonSchemaGenerater[Config] { + res := ConfigJsonSchemaGenerater[Config]{} + res.repoName = "github.com/0xPolygonHermez/zkevm-node" + res.repoNameSuffix = "/config/config" + res.addCodeCommentsToSchema = true + res.pathSourceCode = "./" + res.cleanRequiredField = true + res.checkNoMapStructureIsRenamingFields = true + config_default_values, err := Default() + res.defaultValues = config_default_values + if err != nil { + panic("can't create default values for config file") + } + return res +} + +// NewNetworkConfigJsonSchemaGenerater returns a new class for generating json-schema of the network-custom config file (.json) +func NewNetworkConfigJsonSchemaGenerater() ConfigJsonSchemaGenerater[GenesisFromJSON] { + res := ConfigJsonSchemaGenerater[GenesisFromJSON]{} + res.repoName = "github.com/0xPolygonHermez/zkevm-node" + res.repoNameSuffix = "/config/config" + res.addCodeCommentsToSchema = true + res.pathSourceCode = "./" + res.cleanRequiredField = true + res.checkNoMapStructureIsRenamingFields = false + res.defaultValues = nil + return res +} + +// GenerateJsonSchema launchs the generation, and returns the schema +func (s ConfigJsonSchemaGenerater[T]) GenerateJsonSchema(cli *cli.Context) (*jsonschema.Schema, error) { + if s.checkNoMapStructureIsRenamingFields { + checkNoMapStructureIsRenamingFields(s.defaultValues) + } + + r := new(jsonschema.Reflector) + repoName := s.repoName + r.Namer = func(rt reflect.Type) string { + return rt.PkgPath() + "_" + rt.Name() + } + r.ExpandedStruct = true + r.DoNotReference = true + if s.addCodeCommentsToSchema { + if err := r.AddGoComments(repoName, s.pathSourceCode); err != nil { + return nil, err + } + } + + schema := r.Reflect(s.defaultValues) + schema.ID = jsonschema.ID(s.repoName + s.repoNameSuffix) + + if s.cleanRequiredField { + cleanRequiredFields(schema) + } + + if s.defaultValues != nil { + fillDefaultValues(schema, s.defaultValues) + } + + if s.removeNetworkConfig { + schema.Properties.Delete("NetworkConfig") + } + + return schema, nil +} + +// SerializeJsonSchema serializes the schema in JSON to be stored +func (s ConfigJsonSchemaGenerater[T]) SerializeJsonSchema(schema *jsonschema.Schema) ([]byte, error) { + file, err := json.MarshalIndent(schema, "", "\t") + if err != nil { + return nil, err + } + return file, nil +} + +// GenerateJsonSchemaAndWriteToFile generate the schema and store in `output_filename` file +func (s ConfigJsonSchemaGenerater[T]) GenerateJsonSchemaAndWriteToFile(cli *cli.Context, output_filename string) error { + schema, err := s.GenerateJsonSchema(cli) + if err != nil { + return err + } + file, err := s.SerializeJsonSchema(schema) + if err != nil { + return err + } + + err = os.WriteFile(output_filename, file, 0600) //nolint:gomnd + if err != nil { + return err + } + return nil +} + +// The tag `magstructure` is not supported by `jsonschema` module +// so, if you try to rename a field using that the documentation is going to incosistent +// For that reason is a good practice to check that is not present this situation in +// the config files +func checkNoMapStructureIsRenamingFields(data interface{}) { + var reflected reflect.Value + if reflect.ValueOf(data).Kind() == reflect.Ptr { + reflected = reflect.ValueOf(data).Elem() + } else { + reflected = reflect.ValueOf(data) + } + + for i := 0; i < reflected.NumField(); i++ { + field := reflected.Type().Field(i) + tag := field.Tag.Get("mapstructure") + + if len(tag) > 0 && tag != field.Name { + panic("field [" + field.Name + "] is renamed using mapstructure to [" + tag + "]! that is not supported") + } + if field.Type.Kind() == reflect.Struct { + checkNoMapStructureIsRenamingFields(reflected.FieldByName(field.Name).Interface()) + } + } +} + +func variantFieldIsSet(field *interface{}) bool { + value := reflect.ValueOf(field) + if value.Kind() == reflect.Ptr && value.IsNil() { + return false + } else { + return true + } +} + +func fillDefaultValues(schema *jsonschema.Schema, default_config interface{}) { + fillDefaultValuesPartial(schema, default_config) +} + +func getFieldNameFromTag(data reflect.Value, key string, tagName string) (reflect.Value, bool) { + v := data + t := v.Type() + + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + tag := field.Tag.Get(tagName) + + if tag == key { + return v.Field(i), true + } + } + + return reflect.Value{}, false +} + +func getValueFromStruct(default_config interface{}, key string) reflect.Value { + var default_object reflect.Value + if reflect.ValueOf(default_config).Kind() == reflect.Ptr { + default_object = reflect.ValueOf(default_config).Elem() + } else { + default_object = reflect.ValueOf(default_config) + } + default_value := default_object.FieldByName(key) + if !default_value.IsValid() { + mappedFieldName, found := getFieldNameFromTag(default_object, key, "json") + if found { + default_value = mappedFieldName + } + } + return default_value +} + +func fillDefaultValuesPartial(schema *jsonschema.Schema, default_config interface{}) { + if schema.Properties == nil { + return + } + for _, key := range schema.Properties.Keys() { + value, ok := schema.Properties.Get(key) + if ok { + value_schema, _ := value.(*jsonschema.Schema) + default_value := getValueFromStruct(default_config, key) + if default_value.IsValid() && variantFieldIsSet(&value_schema.Default) { + switch value_schema.Type { + case "array": + if !default_value.IsZero() && !default_value.IsNil() { + def_value := default_value.Interface() + value_schema.Default = def_value + } + case "object": + fillDefaultValuesPartial(value_schema, default_value.Interface()) + default: // string, number, integer, boolean + if default_value.Type() == reflect.TypeOf(types.Duration{}) { + duration, ok := default_value.Interface().(types.Duration) + if ok { + value_schema.Default = duration.String() + } + } else { + value_schema.Default = default_value.Interface() + } + } + } + } + } +} + +func cleanRequiredFields(schema *jsonschema.Schema) { + schema.Required = []string{} + if schema.Properties == nil { + return + } + for _, key := range schema.Properties.Keys() { + value, ok := schema.Properties.Get(key) + if ok { + value_schema, _ := value.(*jsonschema.Schema) + value_schema.Required = []string{} + switch value_schema.Type { + case "object": + cleanRequiredFields(value_schema) + case "array": + cleanRequiredFields(value_schema.Items) + } + } + } +} diff --git a/config/gen_json_schema_test.go b/config/gen_json_schema_test.go new file mode 100644 index 0000000000..f43724c084 --- /dev/null +++ b/config/gen_json_schema_test.go @@ -0,0 +1,354 @@ +package config + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "testing" + "time" + + "github.com/0xPolygonHermez/zkevm-node/config/types" + "github.com/invopop/jsonschema" + "github.com/mitchellh/mapstructure" + "github.com/spf13/viper" + "github.com/stretchr/testify/require" + "github.com/urfave/cli/v2" +) + +type MySectionConfig struct { +} + +/* +Config represents the configuration of the entire Hermez Node +The file is [TOML format](https://en.wikipedia.org/wiki/TOML#) + +You could find some examples: +- `config/environments/local/local.node.config.toml`: running a permisionless node +- `config/environments/mainnet/public.node.config.toml` +- `config/environments/public/public.node.config.toml` +- `test/config/test.node.config.toml`: configuration for a trusted node used in CI +*/ +type MyTestConfig struct { + // F1 field description + F1 string + // F2 field description + F2 int +} + +type MyTestConfigWithJsonRenaming struct { + F1 string `json:"f1_another_name"` + F2 int `json:"f2_another_name"` +} + +type MyTestConfigWithMapstructureRenaming struct { + F1 string `mapstructure:"f1_another_name"` + F2 int `mapstructure:"f2_another_name"` +} + +type ExapmleTestWithSimpleArrays struct { + F1 string + // Example of array + Outputs []string +} + +type MyTestConfigWithMapstructureRenamingInSubStruct struct { + F1 string + F2 int + F3 MyTestConfigWithMapstructureRenaming +} +type KeystoreFileConfigExample struct { + // Path is the file path for the key store file + Path string + + // Password is the password to decrypt the key store file + Password string +} + +type ConfigWithDurationAndAComplexArray struct { + // FrequencyToMonitorTxs frequency of the resending failed txs + FrequencyToMonitorTxs types.Duration + + // PrivateKeys defines all the key store files that are going + // to be read in order to provide the private keys to sign the L1 txs + PrivateKeys []KeystoreFileConfigExample +} + +func checkDefaultValue(t *testing.T, schema *jsonschema.Schema, key []string, expectedValue interface{}) { + v, err := getValueFromSchema(schema, key) + require.NoError(t, err) + require.EqualValues(t, expectedValue, v.Default) +} + +const MyTestConfigTomlFile = ` +f1_another_name="value_f1" +f2_another_name=5678 +` + +func TestGenerateJsonSchemaCommentsWithDurationItem(t *testing.T) { + cli := cli.NewContext(nil, nil, nil) + duration, err := time.ParseDuration("1m") + require.NoError(t, err) + generator := ConfigJsonSchemaGenerater[ConfigWithDurationAndAComplexArray]{ + repoName: "github.com/0xPolygonHermez/zkevm-node/config/", + cleanRequiredField: true, + addCodeCommentsToSchema: true, + pathSourceCode: "./", + repoNameSuffix: "config/", + defaultValues: &ConfigWithDurationAndAComplexArray{ + FrequencyToMonitorTxs: types.NewDuration(duration), + }, + } + schema, err := generator.GenerateJsonSchema(cli) + require.NoError(t, err) + require.NotNil(t, schema) + v, err := getValueFromSchema(schema, []string{"FrequencyToMonitorTxs"}) + require.NoError(t, err) + require.EqualValues(t, "1m0s", v.Default) + require.NotEmpty(t, v.Description) +} + +func TestGenerateJsonSchemaCommentsWithComplexArrays(t *testing.T) { + cli := cli.NewContext(nil, nil, nil) + PrivateKeys := []KeystoreFileConfigExample{{Path: "/pk/sequencer.keystore", Password: "testonly"}} + generator := ConfigJsonSchemaGenerater[ConfigWithDurationAndAComplexArray]{ + repoName: "github.com/0xPolygonHermez/zkevm-node/config/", + cleanRequiredField: true, + addCodeCommentsToSchema: true, + pathSourceCode: "./", + repoNameSuffix: "config/", + defaultValues: &ConfigWithDurationAndAComplexArray{ + PrivateKeys: PrivateKeys, + }, + } + schema, err := generator.GenerateJsonSchema(cli) + require.NoError(t, err) + require.NotNil(t, schema) + v, err := getValueFromSchema(schema, []string{"PrivateKeys"}) + require.NoError(t, err) + require.EqualValues(t, PrivateKeys, v.Default) + require.NotEmpty(t, v.Description) + serialized, err := generator.SerializeJsonSchema(schema) + require.NoError(t, err) + var decoded interface{} + err = json.Unmarshal(serialized, &decoded) + require.NoError(t, err) + //def := decoded["properties"]["PrivateKeys"]["default"] + def := decoded.(map[string]interface{})["properties"].(map[string]interface{})["PrivateKeys"].(map[string]interface{})["default"] + s := fmt.Sprint(def) + require.EqualValues(t, "[map[Password:testonly Path:/pk/sequencer.keystore]]", s) +} + +func TestGenerateJsonSchemaCommentsWithArrays(t *testing.T) { + cli := cli.NewContext(nil, nil, nil) + generator := ConfigJsonSchemaGenerater[ExapmleTestWithSimpleArrays]{ + repoName: "github.com/0xPolygonHermez/zkevm-node/config/", + cleanRequiredField: true, + addCodeCommentsToSchema: true, + pathSourceCode: "./", + repoNameSuffix: "config/", + defaultValues: &ExapmleTestWithSimpleArrays{ + F1: "defaultf1", + Outputs: []string{"abc"}, + }, + } + schema, err := generator.GenerateJsonSchema(cli) + require.NoError(t, err) + require.NotNil(t, schema) + v, err := getValueFromSchema(schema, []string{"Outputs"}) + require.NoError(t, err) + require.EqualValues(t, []string{"abc"}, v.Default) + require.NotEmpty(t, v.Description) +} + +func TestGenerateJsonSchemaCommentsWithMultiplesLines(t *testing.T) { + cli := cli.NewContext(nil, nil, nil) + generator := ConfigJsonSchemaGenerater[MyTestConfig]{ + repoName: "github.com/0xPolygonHermez/zkevm-node/config/", + cleanRequiredField: true, + addCodeCommentsToSchema: true, + pathSourceCode: "./", + repoNameSuffix: "config/", + defaultValues: &MyTestConfig{ + F1: "defaultf1", + F2: 1234, + }, + } + schema, err := generator.GenerateJsonSchema(cli) + require.NoError(t, err) + require.NotNil(t, schema) + v, err := getValueFromSchema(schema, []string{"F2"}) + require.NoError(t, err) + require.EqualValues(t, 1234, v.Default) + require.NotEmpty(t, v.Description) +} + +// This test is just to check what is the behaviour of reading a file +// when using tags `mapstructure` and `json` +func TestExploratoryForCheckReadFromFile(t *testing.T) { + t.Skip("Is not a real test, just an exploratory one") + viper.SetConfigType("toml") + err := viper.ReadConfig(bytes.NewBuffer([]byte(MyTestConfigTomlFile))) + require.NoError(t, err) + + var cfgJson MyTestConfigWithJsonRenaming + err = viper.Unmarshal(&cfgJson, viper.DecodeHook(mapstructure.TextUnmarshallerHookFunc())) + require.NoError(t, err) + + var cfgMapStructure MyTestConfigWithMapstructureRenaming + err = viper.Unmarshal(&cfgMapStructure, viper.DecodeHook(mapstructure.TextUnmarshallerHookFunc())) + require.NoError(t, err) + + require.EqualValues(t, cfgMapStructure.F1, cfgJson.F1) + require.EqualValues(t, cfgMapStructure.F2, cfgJson.F2) +} + +func TestGenerateJsonSchemaCustomWithNameChangingUsingMapsInSubFieldtrucutMustPanic(t *testing.T) { + cli := cli.NewContext(nil, nil, nil) + generator := ConfigJsonSchemaGenerater[MyTestConfigWithMapstructureRenamingInSubStruct]{ + repoName: "mytest", + cleanRequiredField: true, + addCodeCommentsToSchema: true, + pathSourceCode: "./", + checkNoMapStructureIsRenamingFields: true, + defaultValues: &MyTestConfigWithMapstructureRenamingInSubStruct{ + F1: "defaultf1", + F2: 1234, + }, + } + //https://gophersnippets.com/how-to-test-a-function-that-panics + t.Run("panics", func(t *testing.T) { + // If the function panics, recover() will + // return a non nil value. + defer func() { + if r := recover(); r == nil { + t.Errorf("function should panic") + } + }() + + _, err := generator.GenerateJsonSchema(cli) + require.NoError(t, err) + }) +} + +func TestGenerateJsonSchemaCustomWithNameChangingUsingMapstrucutMustPanic(t *testing.T) { + cli := cli.NewContext(nil, nil, nil) + generator := ConfigJsonSchemaGenerater[MyTestConfigWithMapstructureRenaming]{ + repoName: "mytest", + cleanRequiredField: true, + addCodeCommentsToSchema: true, + pathSourceCode: "./", + checkNoMapStructureIsRenamingFields: true, + defaultValues: &MyTestConfigWithMapstructureRenaming{ + F1: "defaultf1", + F2: 1234, + }, + } + //https://gophersnippets.com/how-to-test-a-function-that-panics + t.Run("panics", func(t *testing.T) { + // If the function panics, recover() will + // return a non nil value. + defer func() { + if r := recover(); r == nil { + t.Errorf("function should panic") + } + }() + + _, err := generator.GenerateJsonSchema(cli) + require.NoError(t, err) + }) +} + +// This case is a field that is mapped with another name in the json file +func TestGenerateJsonSchemaCustomWithNameChangingSetDefault(t *testing.T) { + cli := cli.NewContext(nil, nil, nil) + generator := ConfigJsonSchemaGenerater[MyTestConfigWithJsonRenaming]{ + repoName: "mytest", + cleanRequiredField: true, + addCodeCommentsToSchema: true, + pathSourceCode: "./", + defaultValues: &MyTestConfigWithJsonRenaming{ + F1: "defaultf1", + F2: 1234, + }, + } + + schema, err := generator.GenerateJsonSchema(cli) + require.NoError(t, err) + require.NotNil(t, schema) + + checkDefaultValue(t, schema, []string{"f1_another_name"}, "defaultf1") + checkDefaultValue(t, schema, []string{"f2_another_name"}, 1234) +} + +func TestGenerateJsonSchemaCustomSetDefault(t *testing.T) { + cli := cli.NewContext(nil, nil, nil) + generator := ConfigJsonSchemaGenerater[MyTestConfig]{ + repoName: "mytest", + cleanRequiredField: true, + addCodeCommentsToSchema: true, + pathSourceCode: "./", + defaultValues: &MyTestConfig{ + F1: "defaultf1", + F2: 1234, + }, + } + + schema, err := generator.GenerateJsonSchema(cli) + require.NoError(t, err) + require.NotNil(t, schema) + checkDefaultValue(t, schema, []string{"F1"}, "defaultf1") +} + +func TestGenerateJsonSchemaInjectDefaultValue1stLevel(t *testing.T) { + cli := cli.NewContext(nil, nil, nil) + generator := NewNodeConfigJsonSchemaGenerater() + generator.pathSourceCode = "../" + generator.defaultValues.IsTrustedSequencer = false + schema, err := generator.GenerateJsonSchema(cli) + require.NoError(t, err) + require.NotNil(t, schema) + v, err := getValueFromSchema(schema, []string{"IsTrustedSequencer"}) + require.NoError(t, err) + require.EqualValues(t, false, v.Default) +} + +func TestGenerateJsonSchemaInjectDefaultValue2stLevel(t *testing.T) { + cli := cli.NewContext(nil, nil, nil) + generator := NewNodeConfigJsonSchemaGenerater() + generator.pathSourceCode = "../" + // This is a hack, we are not at root folder, then to store the comment is joining .. with reponame + // and doesn't find out the comment + generator.repoName = "github.com/0xPolygonHermez/zkevm-node/config/" + generator.repoNameSuffix = "/config" + generator.defaultValues.Log.Level = "mylevel" + schema, err := generator.GenerateJsonSchema(cli) + require.NoError(t, err) + require.NotNil(t, schema) + v, err := getValueFromSchema(schema, []string{"Log", "Level"}) + require.NoError(t, err) + require.EqualValues(t, "mylevel", v.Default) + require.NotEmpty(t, v.Description) +} + +func getValueFromSchema(schema *jsonschema.Schema, keys []string) (*jsonschema.Schema, error) { + if schema == nil { + return nil, errors.New("schema is null") + } + subschema := schema + + for _, key := range keys { + v, exist := subschema.Properties.Get(key) + + if !exist { + return nil, errors.New("key " + key + " doesnt exist in schema") + } + + new_schema, ok := v.(*jsonschema.Schema) + if !ok { + return nil, errors.New("fails conversion for key " + key + " doesnt exist in schema") + } + subschema = new_schema + } + return subschema, nil +} diff --git a/config/network.go b/config/network.go index 15ad97c0e7..57f1ac8488 100644 --- a/config/network.go +++ b/config/network.go @@ -17,11 +17,16 @@ import ( // NetworkConfig is the configuration struct for the different environments type NetworkConfig struct { - L1Config etherman.L1Config `json:"l1Config"` + // L1: Configuration related to L1 + L1Config etherman.L1Config `json:"l1Config"` + // DEPRECATED L2: address of the `PolygonZkEVMGlobalExitRootL2 proxy` smart contract L2GlobalExitRootManagerAddr common.Address - L2BridgeAddr common.Address - Genesis state.Genesis - MaxCumulativeGasUsed uint64 + // L2: address of the `PolygonZkEVMBridge proxy` smart contract + L2BridgeAddr common.Address + // L1: Genesis of the rollup, first block number and root + Genesis state.Genesis + // Removed beacause is not in use + //MaxCumulativeGasUsed uint64 } type network string @@ -30,20 +35,31 @@ const mainnet network = "mainnet" const testnet network = "testnet" const custom network = "custom" -type genesisFromJSON struct { - Root string `json:"root"` - GenesisBlockNum uint64 `json:"genesisBlockNumber"` - Genesis []genesisAccountFromJSON `json:"genesis"` - L1Config etherman.L1Config +// GenesisFromJSON is the config file for network_custom +type GenesisFromJSON struct { + // L1: root hash of the genesis block + Root string `json:"root"` + // L1: block number of the genesis block + GenesisBlockNum uint64 `json:"genesisBlockNumber"` + // L2: List of states contracts used to populate merkle tree at initial state + Genesis []genesisAccountFromJSON `json:"genesis"` + // L1: configuration of the network + L1Config etherman.L1Config } type genesisAccountFromJSON struct { - Balance string `json:"balance"` - Nonce string `json:"nonce"` - Address string `json:"address"` - Bytecode string `json:"bytecode"` - Storage map[string]string `json:"storage"` - ContractName string `json:"contractName"` + // Address of the account + Balance string `json:"balance"` + // Nonce of the account + Nonce string `json:"nonce"` + // Address of the contract + Address string `json:"address"` + // Byte code of the contract + Bytecode string `json:"bytecode"` + // Initial storage of the contract + Storage map[string]string `json:"storage"` + // Name of the contract in L1 (e.g. "PolygonZkEVMDeployer", "PolygonZkEVMBridge",...) + ContractName string `json:"contractName"` } func (cfg *Config) loadNetworkConfig(ctx *cli.Context) { @@ -96,7 +112,7 @@ func loadGenesisFileAsString(ctx *cli.Context) (string, error) { func loadGenesisFromJSONString(jsonStr string) (NetworkConfig, error) { var cfg NetworkConfig - var cfgJSON genesisFromJSON + var cfgJSON GenesisFromJSON if err := json.Unmarshal([]byte(jsonStr), &cfgJSON); err != nil { return NetworkConfig{}, err } diff --git a/config/types/duration.go b/config/types/duration.go index dc9cedbc4f..c433bf7bea 100644 --- a/config/types/duration.go +++ b/config/types/duration.go @@ -1,6 +1,10 @@ package types -import "time" +import ( + "time" + + "github.com/invopop/jsonschema" +) // Duration is a wrapper type that parses time duration from text. type Duration struct { @@ -21,3 +25,16 @@ func (d *Duration) UnmarshalText(data []byte) error { func NewDuration(duration time.Duration) Duration { return Duration{time.Duration(duration)} } + +// JSONSchema returns a custom schema to be used for the JSON Schema generation of this type +func (Duration) JSONSchema() *jsonschema.Schema { + return &jsonschema.Schema{ + Type: "string", + Title: "Duration", + Description: "Duration expressed in units: [ns, us, ms, s, m, h, d]", + Examples: []interface{}{ + "1m", + "300ms", + }, + } +} diff --git a/db/config.go b/db/config.go index 5adc24f033..ad56155ffa 100644 --- a/db/config.go +++ b/db/config.go @@ -5,16 +5,16 @@ type Config struct { // Database name Name string `mapstructure:"Name"` - // User name + // Database User name User string `mapstructure:"User"` - // Password of the user + // Database Password of the user Password string `mapstructure:"Password"` - // Host address + // Host address of database Host string `mapstructure:"Host"` - // Port Number + // Port Number of database Port string `mapstructure:"Port"` // EnableLog diff --git a/db/migrations/state/0007.sql b/db/migrations/state/0007.sql index 9de2b315ab..c071fd6307 100644 --- a/db/migrations/state/0007.sql +++ b/db/migrations/state/0007.sql @@ -1,6 +1,6 @@ -- +migrate Up ALTER TABLE state.transaction - ADD COLUMN effective_percentage SMALLINT; + ADD COLUMN effective_percentage SMALLINT DEFAULT 255; ALTER TABLE state.receipt ADD COLUMN effective_gas_price BIGINT; diff --git a/docker-compose.yml b/docker-compose.yml index 4768f79fcc..27a64e0780 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -107,7 +107,7 @@ services: zkevm-prover: container_name: zkevm-prover restart: unless-stopped - image: hermeznetwork/zkevm-prover:v0.2.0-RC3 + image: hermeznetwork/zkevm-prover:v2.0.1 depends_on: zkevm-state-db: condition: service_healthy diff --git a/docs/config-file/custom_network-config-doc.html b/docs/config-file/custom_network-config-doc.html new file mode 100644 index 0000000000..76d3a20696 --- /dev/null +++ b/docs/config-file/custom_network-config-doc.html @@ -0,0 +1 @@ + Schema Docs

GenesisFromJSON is the config file for network_custom

Type: string

L1: root hash of the genesis block


Type: integer

L1: block number of the genesis block


Type: array of object

L2: List of states contracts used to populate merkle tree at initial state

Each item of this array must be:

Type: string

Type: string

Type: string

Type: string

Type: string


L1: configuration of the network
Type: integer

Chain ID of the L1 network


Type: array of integer

Address of the L1 contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 Matic token Contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 GlobalExitRootManager contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


\ No newline at end of file diff --git a/docs/config-file/custom_network-config-doc.md b/docs/config-file/custom_network-config-doc.md new file mode 100644 index 0000000000..872d8e0724 --- /dev/null +++ b/docs/config-file/custom_network-config-doc.md @@ -0,0 +1,121 @@ +# Schema Docs + +**Type:** : `object` +**Description:** GenesisFromJSON is the config file for network_custom + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| -------------------------------------------- | ------- | --------------- | ---------- | ---------- | --------------------------------------------------------------------------- | +| - [root](#root ) | No | string | No | - | L1: root hash of the genesis block | +| - [genesisBlockNumber](#genesisBlockNumber ) | No | integer | No | - | L1: block number of the genesis block | +| - [genesis](#genesis ) | No | array of object | No | - | L2: List of states contracts used to populate merkle tree at initial state | +| - [L1Config](#L1Config ) | No | object | No | - | L1: configuration of the network | + +## 1. `root` + +**Type:** : `string` +**Description:** L1: root hash of the genesis block + +## 2. `genesisBlockNumber` + +**Type:** : `integer` +**Description:** L1: block number of the genesis block + +## 3. `genesis` + +**Type:** : `array of object` +**Description:** L2: List of states contracts used to populate merkle tree at initial state + +| | Array restrictions | +| -------------------- | ------------------ | +| **Min items** | N/A | +| **Max items** | N/A | +| **Items unicity** | False | +| **Additional items** | False | +| **Tuple validation** | See below | + +| Each item of this array must be | Description | +| ------------------------------- | ----------- | +| [genesis items](#genesis_items) | - | + +### 3.1. [genesis.genesis items] + +**Type:** : `object` + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------------------- | ------- | ------ | ---------- | ---------- | ----------------- | +| - [balance](#genesis_items_balance ) | No | string | No | - | - | +| - [nonce](#genesis_items_nonce ) | No | string | No | - | - | +| - [address](#genesis_items_address ) | No | string | No | - | - | +| - [bytecode](#genesis_items_bytecode ) | No | string | No | - | - | +| - [storage](#genesis_items_storage ) | No | object | No | - | - | +| - [contractName](#genesis_items_contractName ) | No | string | No | - | - | + +#### 3.1.1. `genesis.genesis items.balance` + +**Type:** : `string` + +#### 3.1.2. `genesis.genesis items.nonce` + +**Type:** : `string` + +#### 3.1.3. `genesis.genesis items.address` + +**Type:** : `string` + +#### 3.1.4. `genesis.genesis items.bytecode` + +**Type:** : `string` + +#### 3.1.5. `[genesis.genesis items.storage]` + +**Type:** : `object` + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------------- | ------- | ------ | ---------- | ---------- | ----------------- | +| - [.*](#genesis_items_storage_pattern1 ) | Yes | string | No | - | - | + +##### 3.1.5.1. Pattern `genesis.genesis items.storage..*` +> All properties whose name matches the regular expression +```.*``` ([Test](https://regex101.com/?regex=.%2A)) +must respect the following conditions + +**Type:** : `string` + +#### 3.1.6. `genesis.genesis items.contractName` + +**Type:** : `string` + +## 4. `[L1Config]` + +**Type:** : `object` +**Description:** L1: configuration of the network + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ----------------------------------------------------------------------------------- | ------- | ---------------- | ---------- | ---------- | ------------------------------------------------ | +| - [chainId](#L1Config_chainId ) | No | integer | No | - | Chain ID of the L1 network | +| - [polygonZkEVMAddress](#L1Config_polygonZkEVMAddress ) | No | array of integer | No | - | Address of the L1 contract | +| - [maticTokenAddress](#L1Config_maticTokenAddress ) | No | array of integer | No | - | Address of the L1 Matic token Contract | +| - [polygonZkEVMGlobalExitRootAddress](#L1Config_polygonZkEVMGlobalExitRootAddress ) | No | array of integer | No | - | Address of the L1 GlobalExitRootManager contract | + +### 4.1. `L1Config.chainId` + +**Type:** : `integer` +**Description:** Chain ID of the L1 network + +### 4.2. `L1Config.polygonZkEVMAddress` + +**Type:** : `array of integer` +**Description:** Address of the L1 contract + +### 4.3. `L1Config.maticTokenAddress` + +**Type:** : `array of integer` +**Description:** Address of the L1 Matic token Contract + +### 4.4. `L1Config.polygonZkEVMGlobalExitRootAddress` + +**Type:** : `array of integer` +**Description:** Address of the L1 GlobalExitRootManager contract + +---------------------------------------------------------------------------------------------------------------------------- +Generated using [json-schema-for-humans](https://github.com/coveooss/json-schema-for-humans) diff --git a/docs/config-file/custom_network-config-schema.json b/docs/config-file/custom_network-config-schema.json new file mode 100644 index 0000000000..4f4d5e423e --- /dev/null +++ b/docs/config-file/custom_network-config-schema.json @@ -0,0 +1,88 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "github.com/0xPolygonHermez/zkevm-node/config/config", + "properties": { + "root": { + "type": "string", + "description": "L1: root hash of the genesis block" + }, + "genesisBlockNumber": { + "type": "integer", + "description": "L1: block number of the genesis block" + }, + "genesis": { + "items": { + "properties": { + "balance": { + "type": "string" + }, + "nonce": { + "type": "string" + }, + "address": { + "type": "string" + }, + "bytecode": { + "type": "string" + }, + "storage": { + "patternProperties": { + ".*": { + "type": "string" + } + }, + "type": "object" + }, + "contractName": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object" + }, + "type": "array", + "description": "L2: List of states contracts used to populate merkle tree at initial state" + }, + "L1Config": { + "properties": { + "chainId": { + "type": "integer", + "description": "Chain ID of the L1 network" + }, + "polygonZkEVMAddress": { + "items": { + "type": "integer" + }, + "type": "array", + "maxItems": 20, + "minItems": 20, + "description": "Address of the L1 contract" + }, + "maticTokenAddress": { + "items": { + "type": "integer" + }, + "type": "array", + "maxItems": 20, + "minItems": 20, + "description": "Address of the L1 Matic token Contract" + }, + "polygonZkEVMGlobalExitRootAddress": { + "items": { + "type": "integer" + }, + "type": "array", + "maxItems": 20, + "minItems": 20, + "description": "Address of the L1 GlobalExitRootManager contract" + } + }, + "additionalProperties": false, + "type": "object", + "description": "L1: configuration of the network" + } + }, + "additionalProperties": false, + "type": "object", + "description": "GenesisFromJSON is the config file for network_custom" +} \ No newline at end of file diff --git a/docs/config-file/node-config-doc.html b/docs/config-file/node-config-doc.html new file mode 100644 index 0000000000..04f2a688b7 --- /dev/null +++ b/docs/config-file/node-config-doc.html @@ -0,0 +1,67 @@ + Schema Docs

Config represents the configuration of the entire Hermez Node The file is TOML format You could find some examples:

Default: falseType: boolean

This define is a trusted node (true) or a permission less (false). If you don't known
set to false


Default: 0Type: integer

Last batch number before a forkid change (fork upgrade). That implies that
greater batch numbers are going to be trusted but no virtualized neither verified.
So after the batch number ForkUpgradeBatchNumber is virtualized and verified you could update
the system (SC,...) to new forkId and remove this value to allow the system to keep
Virtualizing and verifying the new batchs.
Check issue #2236 to known more
This value overwrite SequenceSender.ForkUpgradeBatchNumber


Default: 0Type: integer

Which is the new forkId


Configure Log level for all the services, allow also to store the logs in a file
Default: "development"Type: enum (of string)

Environment defining the log format ("production" or "development").
In development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above.
Check here

Must be one of:

  • "production"
  • "development"

Default: "info"Type: enum (of string)

Level of log. As lower value more logs are going to be generated

Must be one of:

  • "debug"
  • "info"
  • "warn"
  • "error"
  • "dpanic"
  • "panic"
  • "fatal"

Default: ["stderr"]Type: array of string

Outputs

Each item of this array must be:


Configuration of the etherman (client for access L1)
Default: "http://localhost:8545"Type: string

URL is the URL of the Ethereum node for L1


Default: falseType: boolean

allow that L1 gas price calculation use multiples sources


Configuration for use Etherscan as used as gas provider, basically it needs the API-KEY
Default: ""Type: string

Need API key to use etherscan, if it's empty etherscan is not used


Default: ""Type: string

URL of the etherscan API. Overwritten with a hardcoded URL: "https://api.etherscan.io/api?module=gastracker&action=gasoracle&apikey="


Configuration for ethereum transaction manager
Default: "1s"Type: string

FrequencyToMonitorTxs frequency of the resending failed txs


Examples:

"1m"
+
"300ms"
+

Default: "2m0s"Type: string

WaitTxToBeMined time to wait after transaction was sent to the ethereum


Examples:

"1m"
+
"300ms"
+

Type: array of object

PrivateKeys defines all the key store files that are going
to be read in order to provide the private keys to sign the L1 txs

Each item of this array must be:

Type: string

Path is the file path for the key store file


Type: string

Password is the password to decrypt the key store file



Default: 0Type: integer

ForcedGas is the amount of gas to be forced in case of gas estimation error


Pool service configuration
Default: "5m0s"Type: string

IntervalToRefreshBlockedAddresses is the time it takes to sync the
blocked address list from db to memory


Examples:

"1m"
+
"300ms"
+

Default: "5s"Type: string

IntervalToRefreshGasPrices is the time to wait to refresh the gas prices


Examples:

"1m"
+
"300ms"
+

Default: 100132Type: integer

MaxTxBytesSize is the max size of a transaction in bytes


Default: 100000Type: integer

MaxTxDataBytesSize is the max size of the data field of a transaction in bytes


DB is the database configuration
Default: "pool_db"Type: string

Database name


Default: "pool_user"Type: string

Database User name


Default: "pool_password"Type: string

Database Password of the user


Default: "zkevm-pool-db"Type: string

Host address of database


Default: "5432"Type: string

Port Number of database


Default: falseType: boolean

EnableLog


Default: 200Type: integer

MaxConns is the maximum number of connections in the pool.


Default: 1000000000Type: integer

DefaultMinGasPriceAllowed is the default min gas price to suggest


Default: "5m0s"Type: string

MinAllowedGasPriceInterval is the interval to look back of the suggested min gas price for a tx


Examples:

"1m"
+
"300ms"
+

Default: "15s"Type: string

PollMinAllowedGasPriceInterval is the interval to poll the suggested min gas price for a tx


Examples:

"1m"
+
"300ms"
+

Default: 64Type: integer

AccountQueue represents the maximum number of non-executable transaction slots permitted per account


Default: 1024Type: integer

GlobalQueue represents the maximum number of non-executable transaction slots for all accounts


Configuration for RPC service. THis one offers a extended Ethereum JSON-RPC API interface to interact with the node
Default: "0.0.0.0"Type: string

Host defines the network adapter that will be used to serve the HTTP requests


Default: 8545Type: integer

Port defines the port to serve the endpoints via HTTP


Default: "1m0s"Type: string

ReadTimeout is the HTTP server read timeout
check net/http.server.ReadTimeout and net/http.server.ReadHeaderTimeout


Examples:

"1m"
+
"300ms"
+

Default: "1m0s"Type: string

WriteTimeout is the HTTP server write timeout
check net/http.server.WriteTimeout


Examples:

"1m"
+
"300ms"
+

Default: 500Type: number

MaxRequestsPerIPAndSecond defines how much requests a single IP can
send within a single second


Default: ""Type: string

SequencerNodeURI is used allow Non-Sequencer nodes
to relay transactions to the Sequencer node


Default: 0Type: integer

MaxCumulativeGasUsed is the max gas allowed per batch


WebSockets configuration
Default: trueType: boolean

Enabled defines if the WebSocket requests are enabled or disabled


Default: "0.0.0.0"Type: string

Host defines the network adapter that will be used to serve the WS requests


Default: 8546Type: integer

Port defines the port to serve the endpoints via WS


Default: trueType: boolean

EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price.


Default: trueType: boolean

TraceBatchUseHTTPS enables, in the debugtraceBatchByNum endpoint, the use of the HTTPS protocol (instead of HTTP)
to do the parallel requests to RPC.debug
traceTransaction endpoint


Configuration of service `Syncrhonizer`. For this service is also really important the value of `IsTrustedSequencer` because depending of this values is going to ask to a trusted node for trusted transactions or not
Default: "1s"Type: string

SyncInterval is the delay interval between reading new rollup information


Examples:

"1m"
+
"300ms"
+

Default: 100Type: integer

SyncChunkSize is the number of blocks to sync on each chunk


Default: ""Type: string

TrustedSequencerURL is the rpc url to connect and sync the trusted state


Configuration of the sequencer service
Default: "1s"Type: string

WaitPeriodPoolIsEmpty is the time the sequencer waits until
trying to add new txs to the state


Examples:

"1m"
+
"300ms"
+

Default: 100Type: integer

BlocksAmountForTxsToBeDeleted is blocks amount after which txs will be deleted from the pool


Default: "12h0m0s"Type: string

FrequencyToCheckTxsForDelete is frequency with which txs will be checked for deleting


Examples:

"1m"
+
"300ms"
+

Default: 300Type: integer

MaxTxsPerBatch is the maximum amount of transactions in the batch


Default: 120000Type: integer

MaxBatchBytesSize is the maximum batch size in bytes
(subtracted bits of all types.Sequence fields excluding BatchL2Data from MaxTxSizeForL1)


Default: 30000000Type: integer

MaxCumulativeGasUsed is max gas amount used by batch


Default: 2145Type: integer

MaxKeccakHashes is max keccak hashes used by batch


Default: 252357Type: integer

MaxPoseidonHashes is max poseidon hashes batch can handle


Default: 135191Type: integer

MaxPoseidonPaddings is max poseidon paddings batch can handle


Default: 236585Type: integer

MaxMemAligns is max mem aligns batch can handle


Default: 236585Type: integer

MaxArithmetics is max arithmetics batch can handle


Default: 473170Type: integer

MaxBinaries is max binaries batch can handle


Default: 7570538Type: integer

MaxSteps is max steps batch can handle


Default: 1Type: integer

WeightBatchBytesSize is the cost weight for the BatchBytesSize batch resource


Default: 1Type: integer

WeightCumulativeGasUsed is the cost weight for the CumulativeGasUsed batch resource


Default: 1Type: integer

WeightKeccakHashes is the cost weight for the KeccakHashes batch resource


Default: 1Type: integer

WeightPoseidonHashes is the cost weight for the PoseidonHashes batch resource


Default: 1Type: integer

WeightPoseidonPaddings is the cost weight for the PoseidonPaddings batch resource


Default: 1Type: integer

WeightMemAligns is the cost weight for the MemAligns batch resource


Default: 1Type: integer

WeightArithmetics is the cost weight for the Arithmetics batch resource


Default: 1Type: integer

WeightBinaries is the cost weight for the Binaries batch resource


Default: 1Type: integer

WeightSteps is the cost weight for the Steps batch resource


Default: "10m0s"Type: string

TxLifetimeCheckTimeout is the time the sequencer waits to check txs lifetime


Examples:

"1m"
+
"300ms"
+

Default: "3h0m0s"Type: string

MaxTxLifetime is the time a tx can be in the sequencer memory


Examples:

"1m"
+
"300ms"
+

Finalizer's specific config properties
Default: "5s"Type: string

GERDeadlineTimeout is the time the finalizer waits after receiving closing signal to update Global Exit Root


Examples:

"1m"
+
"300ms"
+

Default: "1m0s"Type: string

ForcedBatchDeadlineTimeout is the time the finalizer waits after receiving closing signal to process Forced Batches


Examples:

"1m"
+
"300ms"
+

Default: "100ms"Type: string

SleepDuration is the time the finalizer sleeps between each iteration, if there are no transactions to be processed


Examples:

"1m"
+
"300ms"
+

Default: 10Type: integer

ResourcePercentageToCloseBatch is the percentage window of the resource left out for the batch to be closed


Default: 64Type: integer

GERFinalityNumberOfBlocks is number of blocks to consider GER final


Default: "10s"Type: string

ClosingSignalsManagerWaitForCheckingL1Timeout is used by the closing signals manager to wait for its operation


Examples:

"1m"
+
"300ms"
+

Default: "10s"Type: string

ClosingSignalsManagerWaitForCheckingGER is used by the closing signals manager to wait for its operation


Examples:

"1m"
+
"300ms"
+

Default: "10s"Type: string

ClosingSignalsManagerWaitForCheckingL1Timeout is used by the closing signals manager to wait for its operation


Examples:

"1m"
+
"300ms"
+

Default: 64Type: integer

ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final


Default: "10s"Type: string

TimestampResolution is the resolution of the timestamp used to close a batch


Examples:

"1m"
+
"300ms"
+

Default: 0Type: integer

StopSequencerOnBatchNum specifies the batch number where the Sequencer will stop to process more transactions and generate new batches. The Sequencer will halt after it closes the batch equal to this number


DBManager's specific config properties
Default: "500ms"Type: string

Examples:

"1m"
+
"300ms"
+

Default: "5s"Type: string

Examples:

"1m"
+
"300ms"
+

Worker's specific config properties
Default: 1000Type: number

ResourceCostMultiplier is the multiplier for the resource cost


EffectiveGasPrice is the config for the gas price
Default: 10Type: integer

MaxBreakEvenGasPriceDeviationPercentage is the max allowed deviation percentage BreakEvenGasPrice on re-calculation


Default: 0.25Type: number

L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price


Default: 16Type: integer

ByteGasCost is the gas cost per byte


Default: 1Type: number

MarginFactor is the margin factor percentage to be added to the L2 min gas price


Default: falseType: boolean

Enabled is a flag to enable/disable the effective gas price


Default: 0Type: integer

DefaultMinGasPriceAllowed is the default min gas price to suggest
This value is assigned from [Pool].DefaultMinGasPriceAllowed


Configuration of the sequence sender service
Default: "5s"Type: string

WaitPeriodSendSequence is the time the sequencer waits until
trying to send a sequence to L1


Examples:

"1m"
+
"300ms"
+

Default: "5s"Type: string

LastBatchVirtualizationTimeMaxWaitPeriod is time since sequences should be sent


Examples:

"1m"
+
"300ms"
+

Default: 131072Type: integer

MaxTxSizeForL1 is the maximum size a single transaction can have. This field has
non-trivial consequences: larger transactions than 128KB are significantly harder and
more expensive to propagate; larger transactions also take more resources
to validate whether they fit into the pool or not.


Default: "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266"Type: string

SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs


Default: [{"Path": "/pk/sequencer.keystore", "Password": "testonly"}]Type: array of object

PrivateKeys defines all the key store files that are going
to be read in order to provide the private keys to sign the L1 txs

Each item of this array must be:

Type: string

Path is the file path for the key store file


Type: string

Password is the password to decrypt the key store file



Default: 0Type: integer

Batch number where there is a forkid change (fork upgrade)


Configuration of the aggregator service
Default: "0.0.0.0"Type: string

Host for the grpc server


Default: 50081Type: integer

Port for the grpc server


Default: "5s"Type: string

RetryTime is the time the aggregator main loop sleeps if there are no proofs to aggregate
or batches to generate proofs. It is also used in the isSynced loop


Examples:

"1m"
+
"300ms"
+

Default: "1m30s"Type: string

VerifyProofInterval is the interval of time to verify/send an proof in L1


Examples:

"1m"
+
"300ms"
+

Default: "5s"Type: string

ProofStatePollingInterval is the interval time to polling the prover about the generation state of a proof


Examples:

"1m"
+
"300ms"
+

Default: "acceptall"Type: string

TxProfitabilityCheckerType type for checking is it profitable for aggregator to validate batch
possible values: base/acceptall


TxProfitabilityMinReward min reward for base tx profitability checker when aggregator will validate batch this parameter is used for the base tx profitability checker
Default: "0s"Type: string

IntervalAfterWhichBatchConsolidateAnyway this is interval for the main sequencer, that will check if there is no transactions


Examples:

"1m"
+
"300ms"
+

Default: 0Type: integer

ChainID is the L2 ChainID provided by the Network Config


Default: 0Type: integer

ForkID is the L2 ForkID provided by the Network Config


Default: ""Type: string

SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs


Default: "2m0s"Type: string

CleanupLockedProofsInterval is the interval of time to clean up locked proofs.


Examples:

"1m"
+
"300ms"
+

Default: "10m"Type: string

GeneratingProofCleanupThreshold represents the time interval after
which a proof in generating state is considered to be stuck and
allowed to be cleared.


Configuration of the genesis of the network. This is used to known the initial state of the network

L1: Configuration related to L1
Default: 0Type: integer

Chain ID of the L1 network


Type: array of integer

Address of the L1 contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 Matic token Contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

Address of the L1 GlobalExitRootManager contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

DEPRECATED L2: address of the PolygonZkEVMGlobalExitRootL2 proxy smart contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


Type: array of integer

L2: address of the PolygonZkEVMBridge proxy smart contract

Must contain a minimum of 20 items

Must contain a maximum of 20 items

Each item of this array must be:


L1: Genesis of the rollup, first block number and root
Default: 0Type: integer

GenesisBlockNum is the block number where the polygonZKEVM smc was deployed on L1


Type: array of integer

Root hash of the genesis block

Must contain a minimum of 32 items

Must contain a maximum of 32 items

Each item of this array must be:


Type: array of object

Contracts to be deployed to L2

Each item of this array must be:


Configuration of the gas price suggester service
Default: "follower"Type: string

Default: 2000000000Type: integer

DefaultGasPriceWei is used to set the gas price to be used by the default gas pricer or as minimim gas price by the follower gas pricer.


Default: 0Type: integer

MaxGasPriceWei is used to limit the gas price returned by the follower gas pricer to a maximum value. It is ignored if 0.


Default: 0Type: integer

Default: 0Type: integer

Default: "10s"Type: string

Examples:

"1m"
+
"300ms"
+

Default: "1h0m0s"Type: string

Examples:

"1m"
+
"300ms"
+

Default: "5m0s"Type: string

Examples:

"1m"
+
"300ms"
+

Default: 0.15Type: number

Configuration of the executor service
Default: "zkevm-prover:50071"Type: string

Default: 3Type: integer

MaxResourceExhaustedAttempts is the max number of attempts to make a transaction succeed because of resource exhaustion


Default: "1s"Type: string

WaitOnResourceExhaustion is the time to wait before retrying a transaction because of resource exhaustion


Examples:

"1m"
+
"300ms"
+

Default: 100000000Type: integer

Configuration of the merkle tree client service. Not use in the node, only for testing
Default: "zkevm-prover:50061"Type: string

URI is the server URI.


Configuration of the state database connection
Default: "state_db"Type: string

Database name


Default: "state_user"Type: string

Database User name


Default: "state_password"Type: string

Database Password of the user


Default: "zkevm-state-db"Type: string

Host address of database


Default: "5432"Type: string

Port Number of database


Default: falseType: boolean

EnableLog


Default: 200Type: integer

MaxConns is the maximum number of connections in the pool.


Configuration of the metrics service, basically is where is going to publish the metrics
Default: "0.0.0.0"Type: string

Host is the address to bind the metrics server


Default: 9091Type: integer

Port is the port to bind the metrics server


Default: falseType: boolean

Enabled is the flag to enable/disable the metrics server


Default: ""Type: string

ProfilingHost is the address to bind the profiling server


Default: 0Type: integer

ProfilingPort is the port to bind the profiling server


Default: falseType: boolean

ProfilingEnabled is the flag to enable/disable the profiling server


Configuration of the event database connection

DB is the database configuration
Default: ""Type: string

Database name


Default: ""Type: string

Database User name


Default: ""Type: string

Database Password of the user


Default: ""Type: string

Host address of database


Default: ""Type: string

Port Number of database


Default: falseType: boolean

EnableLog


Default: 0Type: integer

MaxConns is the maximum number of connections in the pool.


Configuration of the hash database connection
Default: "prover_db"Type: string

Database name


Default: "prover_user"Type: string

Database User name


Default: "prover_pass"Type: string

Database Password of the user


Default: "zkevm-state-db"Type: string

Host address of database


Default: "5432"Type: string

Port Number of database


Default: falseType: boolean

EnableLog


Default: 200Type: integer

MaxConns is the maximum number of connections in the pool.


\ No newline at end of file diff --git a/docs/config-file/node-config-doc.md b/docs/config-file/node-config-doc.md new file mode 100644 index 0000000000..69c8f8bb43 --- /dev/null +++ b/docs/config-file/node-config-doc.md @@ -0,0 +1,3089 @@ +# Schema Docs + +**Type:** : `object` +**Description:** Config represents the configuration of the entire Hermez Node The file is TOML format You could find some examples: + +[TOML format]: https://en.wikipedia.org/wiki/TOML + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------------------------- | ------- | ------- | ---------- | ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [IsTrustedSequencer](#IsTrustedSequencer ) | No | boolean | No | - | This define is a trusted node (\`true\`) or a permission less (\`false\`). If you don't known
set to \`false\` | +| - [ForkUpgradeBatchNumber](#ForkUpgradeBatchNumber ) | No | integer | No | - | Last batch number before a forkid change (fork upgrade). That implies that
greater batch numbers are going to be trusted but no virtualized neither verified.
So after the batch number \`ForkUpgradeBatchNumber\` is virtualized and verified you could update
the system (SC,...) to new forkId and remove this value to allow the system to keep
Virtualizing and verifying the new batchs.
Check issue [#2236](https://github.com/0xPolygonHermez/zkevm-node/issues/2236) to known more
This value overwrite \`SequenceSender.ForkUpgradeBatchNumber\` | +| - [ForkUpgradeNewForkId](#ForkUpgradeNewForkId ) | No | integer | No | - | Which is the new forkId | +| - [Log](#Log ) | No | object | No | - | Configure Log level for all the services, allow also to store the logs in a file | +| - [Etherman](#Etherman ) | No | object | No | - | Configuration of the etherman (client for access L1) | +| - [EthTxManager](#EthTxManager ) | No | object | No | - | Configuration for ethereum transaction manager | +| - [Pool](#Pool ) | No | object | No | - | Pool service configuration | +| - [RPC](#RPC ) | No | object | No | - | Configuration for RPC service. THis one offers a extended Ethereum JSON-RPC API interface to interact with the node | +| - [Synchronizer](#Synchronizer ) | No | object | No | - | Configuration of service \`Syncrhonizer\`. For this service is also really important the value of \`IsTrustedSequencer\`
because depending of this values is going to ask to a trusted node for trusted transactions or not | +| - [Sequencer](#Sequencer ) | No | object | No | - | Configuration of the sequencer service | +| - [SequenceSender](#SequenceSender ) | No | object | No | - | Configuration of the sequence sender service | +| - [Aggregator](#Aggregator ) | No | object | No | - | Configuration of the aggregator service | +| - [NetworkConfig](#NetworkConfig ) | No | object | No | - | Configuration of the genesis of the network. This is used to known the initial state of the network | +| - [L2GasPriceSuggester](#L2GasPriceSuggester ) | No | object | No | - | Configuration of the gas price suggester service | +| - [Executor](#Executor ) | No | object | No | - | Configuration of the executor service | +| - [MTClient](#MTClient ) | No | object | No | - | Configuration of the merkle tree client service. Not use in the node, only for testing | +| - [StateDB](#StateDB ) | No | object | No | - | Configuration of the state database connection | +| - [Metrics](#Metrics ) | No | object | No | - | Configuration of the metrics service, basically is where is going to publish the metrics | +| - [EventLog](#EventLog ) | No | object | No | - | Configuration of the event database connection | +| - [HashDB](#HashDB ) | No | object | No | - | Configuration of the hash database connection | + +## 1. `IsTrustedSequencer` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** This define is a trusted node (`true`) or a permission less (`false`). If you don't known +set to `false` + +**Example setting the default value** (false): +``` +IsTrustedSequencer=false +``` + +## 2. `ForkUpgradeBatchNumber` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** Last batch number before a forkid change (fork upgrade). That implies that +greater batch numbers are going to be trusted but no virtualized neither verified. +So after the batch number `ForkUpgradeBatchNumber` is virtualized and verified you could update +the system (SC,...) to new forkId and remove this value to allow the system to keep +Virtualizing and verifying the new batchs. +Check issue [#2236](https://github.com/0xPolygonHermez/zkevm-node/issues/2236) to known more +This value overwrite `SequenceSender.ForkUpgradeBatchNumber` + +**Example setting the default value** (0): +``` +ForkUpgradeBatchNumber=0 +``` + +## 3. `ForkUpgradeNewForkId` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** Which is the new forkId + +**Example setting the default value** (0): +``` +ForkUpgradeNewForkId=0 +``` + +## 4. `[Log]` + +**Type:** : `object` +**Description:** Configure Log level for all the services, allow also to store the logs in a file + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------- | ------- | ---------------- | ---------- | ---------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [Environment](#Log_Environment ) | No | enum (of string) | No | - | Environment defining the log format ("production" or "development").
In development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above.
Check [here](https://pkg.go.dev/go.uber.org/zap@v1.24.0#NewDevelopmentConfig) | +| - [Level](#Log_Level ) | No | enum (of string) | No | - | Level of log. As lower value more logs are going to be generated | +| - [Outputs](#Log_Outputs ) | No | array of string | No | - | Outputs | + +### 4.1. `Log.Environment` + +**Type:** : `enum (of string)` + +**Default:** `"development"` + +**Description:** Environment defining the log format ("production" or "development"). +In development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above. +Check [here](https://pkg.go.dev/go.uber.org/zap@v1.24.0#NewDevelopmentConfig) + +**Example setting the default value** ("development"): +``` +[Log] +Environment="development" +``` + +Must be one of: +* "production" +* "development" + +### 4.2. `Log.Level` + +**Type:** : `enum (of string)` + +**Default:** `"info"` + +**Description:** Level of log. As lower value more logs are going to be generated + +**Example setting the default value** ("info"): +``` +[Log] +Level="info" +``` + +Must be one of: +* "debug" +* "info" +* "warn" +* "error" +* "dpanic" +* "panic" +* "fatal" + +### 4.3. `Log.Outputs` + +**Type:** : `array of string` + +**Default:** `["stderr"]` + +**Description:** Outputs + +**Example setting the default value** (["stderr"]): +``` +[Log] +Outputs=["stderr"] +``` + +## 5. `[Etherman]` + +**Type:** : `object` +**Description:** Configuration of the etherman (client for access L1) + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------- | ------- | ------- | ---------- | ---------- | --------------------------------------------------------------------------------------- | +| - [URL](#Etherman_URL ) | No | string | No | - | URL is the URL of the Ethereum node for L1 | +| - [MultiGasProvider](#Etherman_MultiGasProvider ) | No | boolean | No | - | allow that L1 gas price calculation use multiples sources | +| - [Etherscan](#Etherman_Etherscan ) | No | object | No | - | Configuration for use Etherscan as used as gas provider, basically it needs the API-KEY | + +### 5.1. `Etherman.URL` + +**Type:** : `string` + +**Default:** `"http://localhost:8545"` + +**Description:** URL is the URL of the Ethereum node for L1 + +**Example setting the default value** ("http://localhost:8545"): +``` +[Etherman] +URL="http://localhost:8545" +``` + +### 5.2. `Etherman.MultiGasProvider` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** allow that L1 gas price calculation use multiples sources + +**Example setting the default value** (false): +``` +[Etherman] +MultiGasProvider=false +``` + +### 5.3. `[Etherman.Etherscan]` + +**Type:** : `object` +**Description:** Configuration for use Etherscan as used as gas provider, basically it needs the API-KEY + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------------------------- | ------- | ------ | ---------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------- | +| - [ApiKey](#Etherman_Etherscan_ApiKey ) | No | string | No | - | Need API key to use etherscan, if it's empty etherscan is not used | +| - [Url](#Etherman_Etherscan_Url ) | No | string | No | - | URL of the etherscan API. Overwritten with a hardcoded URL: "https://api.etherscan.io/api?module=gastracker&action=gasoracle&apikey=" | + +#### 5.3.1. `Etherman.Etherscan.ApiKey` + +**Type:** : `string` + +**Default:** `""` + +**Description:** Need API key to use etherscan, if it's empty etherscan is not used + +**Example setting the default value** (""): +``` +[Etherman.Etherscan] +ApiKey="" +``` + +#### 5.3.2. `Etherman.Etherscan.Url` + +**Type:** : `string` + +**Default:** `""` + +**Description:** URL of the etherscan API. Overwritten with a hardcoded URL: "https://api.etherscan.io/api?module=gastracker&action=gasoracle&apikey=" + +**Example setting the default value** (""): +``` +[Etherman.Etherscan] +Url="" +``` + +## 6. `[EthTxManager]` + +**Type:** : `object` +**Description:** Configuration for ethereum transaction manager + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------------------------------------------------- | ------- | --------------- | ---------- | ---------- | ---------------------------------------------------------------------------------------------------------------------------------- | +| - [FrequencyToMonitorTxs](#EthTxManager_FrequencyToMonitorTxs ) | No | string | No | - | Duration | +| - [WaitTxToBeMined](#EthTxManager_WaitTxToBeMined ) | No | string | No | - | Duration | +| - [PrivateKeys](#EthTxManager_PrivateKeys ) | No | array of object | No | - | PrivateKeys defines all the key store files that are going
to be read in order to provide the private keys to sign the L1 txs | +| - [ForcedGas](#EthTxManager_ForcedGas ) | No | integer | No | - | ForcedGas is the amount of gas to be forced in case of gas estimation error | + +### 6.1. `EthTxManager.FrequencyToMonitorTxs` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"1s"` + +**Description:** FrequencyToMonitorTxs frequency of the resending failed txs + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("1s"): +``` +[EthTxManager] +FrequencyToMonitorTxs="1s" +``` + +### 6.2. `EthTxManager.WaitTxToBeMined` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"2m0s"` + +**Description:** WaitTxToBeMined time to wait after transaction was sent to the ethereum + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("2m0s"): +``` +[EthTxManager] +WaitTxToBeMined="2m0s" +``` + +### 6.3. `EthTxManager.PrivateKeys` + +**Type:** : `array of object` +**Description:** PrivateKeys defines all the key store files that are going +to be read in order to provide the private keys to sign the L1 txs + +| | Array restrictions | +| -------------------- | ------------------ | +| **Min items** | N/A | +| **Max items** | N/A | +| **Items unicity** | False | +| **Additional items** | False | +| **Tuple validation** | See below | + +| Each item of this array must be | Description | +| ---------------------------------------------------- | ------------------------------------------------------------------------------------ | +| [PrivateKeys items](#EthTxManager_PrivateKeys_items) | KeystoreFileConfig has all the information needed to load a private key from a k ... | + +#### 6.3.1. [EthTxManager.PrivateKeys.PrivateKeys items] + +**Type:** : `object` +**Description:** KeystoreFileConfig has all the information needed to load a private key from a key store file + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------- | ------- | ------ | ---------- | ---------- | ------------------------------------------------------ | +| - [Path](#EthTxManager_PrivateKeys_items_Path ) | No | string | No | - | Path is the file path for the key store file | +| - [Password](#EthTxManager_PrivateKeys_items_Password ) | No | string | No | - | Password is the password to decrypt the key store file | + +##### 6.3.1.1. `EthTxManager.PrivateKeys.PrivateKeys items.Path` + +**Type:** : `string` +**Description:** Path is the file path for the key store file + +##### 6.3.1.2. `EthTxManager.PrivateKeys.PrivateKeys items.Password` + +**Type:** : `string` +**Description:** Password is the password to decrypt the key store file + +### 6.4. `EthTxManager.ForcedGas` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** ForcedGas is the amount of gas to be forced in case of gas estimation error + +**Example setting the default value** (0): +``` +[EthTxManager] +ForcedGas=0 +``` + +## 7. `[Pool]` + +**Type:** : `object` +**Description:** Pool service configuration + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | ---------------------------------------------------------------------------------------------------- | +| - [IntervalToRefreshBlockedAddresses](#Pool_IntervalToRefreshBlockedAddresses ) | No | string | No | - | Duration | +| - [IntervalToRefreshGasPrices](#Pool_IntervalToRefreshGasPrices ) | No | string | No | - | Duration | +| - [MaxTxBytesSize](#Pool_MaxTxBytesSize ) | No | integer | No | - | MaxTxBytesSize is the max size of a transaction in bytes | +| - [MaxTxDataBytesSize](#Pool_MaxTxDataBytesSize ) | No | integer | No | - | MaxTxDataBytesSize is the max size of the data field of a transaction in bytes | +| - [DB](#Pool_DB ) | No | object | No | - | DB is the database configuration | +| - [DefaultMinGasPriceAllowed](#Pool_DefaultMinGasPriceAllowed ) | No | integer | No | - | DefaultMinGasPriceAllowed is the default min gas price to suggest | +| - [MinAllowedGasPriceInterval](#Pool_MinAllowedGasPriceInterval ) | No | string | No | - | Duration | +| - [PollMinAllowedGasPriceInterval](#Pool_PollMinAllowedGasPriceInterval ) | No | string | No | - | Duration | +| - [AccountQueue](#Pool_AccountQueue ) | No | integer | No | - | AccountQueue represents the maximum number of non-executable transaction slots permitted per account | +| - [GlobalQueue](#Pool_GlobalQueue ) | No | integer | No | - | GlobalQueue represents the maximum number of non-executable transaction slots for all accounts | + +### 7.1. `Pool.IntervalToRefreshBlockedAddresses` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5m0s"` + +**Description:** IntervalToRefreshBlockedAddresses is the time it takes to sync the +blocked address list from db to memory + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5m0s"): +``` +[Pool] +IntervalToRefreshBlockedAddresses="5m0s" +``` + +### 7.2. `Pool.IntervalToRefreshGasPrices` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5s"` + +**Description:** IntervalToRefreshGasPrices is the time to wait to refresh the gas prices + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5s"): +``` +[Pool] +IntervalToRefreshGasPrices="5s" +``` + +### 7.3. `Pool.MaxTxBytesSize` + +**Type:** : `integer` + +**Default:** `100132` + +**Description:** MaxTxBytesSize is the max size of a transaction in bytes + +**Example setting the default value** (100132): +``` +[Pool] +MaxTxBytesSize=100132 +``` + +### 7.4. `Pool.MaxTxDataBytesSize` + +**Type:** : `integer` + +**Default:** `100000` + +**Description:** MaxTxDataBytesSize is the max size of the data field of a transaction in bytes + +**Example setting the default value** (100000): +``` +[Pool] +MaxTxDataBytesSize=100000 +``` + +### 7.5. `[Pool.DB]` + +**Type:** : `object` +**Description:** DB is the database configuration + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------- | ------- | ------- | ---------- | ---------- | ---------------------------------------------------------- | +| - [Name](#Pool_DB_Name ) | No | string | No | - | Database name | +| - [User](#Pool_DB_User ) | No | string | No | - | Database User name | +| - [Password](#Pool_DB_Password ) | No | string | No | - | Database Password of the user | +| - [Host](#Pool_DB_Host ) | No | string | No | - | Host address of database | +| - [Port](#Pool_DB_Port ) | No | string | No | - | Port Number of database | +| - [EnableLog](#Pool_DB_EnableLog ) | No | boolean | No | - | EnableLog | +| - [MaxConns](#Pool_DB_MaxConns ) | No | integer | No | - | MaxConns is the maximum number of connections in the pool. | + +#### 7.5.1. `Pool.DB.Name` + +**Type:** : `string` + +**Default:** `"pool_db"` + +**Description:** Database name + +**Example setting the default value** ("pool_db"): +``` +[Pool.DB] +Name="pool_db" +``` + +#### 7.5.2. `Pool.DB.User` + +**Type:** : `string` + +**Default:** `"pool_user"` + +**Description:** Database User name + +**Example setting the default value** ("pool_user"): +``` +[Pool.DB] +User="pool_user" +``` + +#### 7.5.3. `Pool.DB.Password` + +**Type:** : `string` + +**Default:** `"pool_password"` + +**Description:** Database Password of the user + +**Example setting the default value** ("pool_password"): +``` +[Pool.DB] +Password="pool_password" +``` + +#### 7.5.4. `Pool.DB.Host` + +**Type:** : `string` + +**Default:** `"zkevm-pool-db"` + +**Description:** Host address of database + +**Example setting the default value** ("zkevm-pool-db"): +``` +[Pool.DB] +Host="zkevm-pool-db" +``` + +#### 7.5.5. `Pool.DB.Port` + +**Type:** : `string` + +**Default:** `"5432"` + +**Description:** Port Number of database + +**Example setting the default value** ("5432"): +``` +[Pool.DB] +Port="5432" +``` + +#### 7.5.6. `Pool.DB.EnableLog` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** EnableLog + +**Example setting the default value** (false): +``` +[Pool.DB] +EnableLog=false +``` + +#### 7.5.7. `Pool.DB.MaxConns` + +**Type:** : `integer` + +**Default:** `200` + +**Description:** MaxConns is the maximum number of connections in the pool. + +**Example setting the default value** (200): +``` +[Pool.DB] +MaxConns=200 +``` + +### 7.6. `Pool.DefaultMinGasPriceAllowed` + +**Type:** : `integer` + +**Default:** `1000000000` + +**Description:** DefaultMinGasPriceAllowed is the default min gas price to suggest + +**Example setting the default value** (1000000000): +``` +[Pool] +DefaultMinGasPriceAllowed=1000000000 +``` + +### 7.7. `Pool.MinAllowedGasPriceInterval` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5m0s"` + +**Description:** MinAllowedGasPriceInterval is the interval to look back of the suggested min gas price for a tx + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5m0s"): +``` +[Pool] +MinAllowedGasPriceInterval="5m0s" +``` + +### 7.8. `Pool.PollMinAllowedGasPriceInterval` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"15s"` + +**Description:** PollMinAllowedGasPriceInterval is the interval to poll the suggested min gas price for a tx + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("15s"): +``` +[Pool] +PollMinAllowedGasPriceInterval="15s" +``` + +### 7.9. `Pool.AccountQueue` + +**Type:** : `integer` + +**Default:** `64` + +**Description:** AccountQueue represents the maximum number of non-executable transaction slots permitted per account + +**Example setting the default value** (64): +``` +[Pool] +AccountQueue=64 +``` + +### 7.10. `Pool.GlobalQueue` + +**Type:** : `integer` + +**Default:** `1024` + +**Description:** GlobalQueue represents the maximum number of non-executable transaction slots for all accounts + +**Example setting the default value** (1024): +``` +[Pool] +GlobalQueue=1024 +``` + +## 8. `[RPC]` + +**Type:** : `object` +**Description:** Configuration for RPC service. THis one offers a extended Ethereum JSON-RPC API interface to interact with the node + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| - [Host](#RPC_Host ) | No | string | No | - | Host defines the network adapter that will be used to serve the HTTP requests | +| - [Port](#RPC_Port ) | No | integer | No | - | Port defines the port to serve the endpoints via HTTP | +| - [ReadTimeout](#RPC_ReadTimeout ) | No | string | No | - | Duration | +| - [WriteTimeout](#RPC_WriteTimeout ) | No | string | No | - | Duration | +| - [MaxRequestsPerIPAndSecond](#RPC_MaxRequestsPerIPAndSecond ) | No | number | No | - | MaxRequestsPerIPAndSecond defines how much requests a single IP can
send within a single second | +| - [SequencerNodeURI](#RPC_SequencerNodeURI ) | No | string | No | - | SequencerNodeURI is used allow Non-Sequencer nodes
to relay transactions to the Sequencer node | +| - [MaxCumulativeGasUsed](#RPC_MaxCumulativeGasUsed ) | No | integer | No | - | MaxCumulativeGasUsed is the max gas allowed per batch | +| - [WebSockets](#RPC_WebSockets ) | No | object | No | - | WebSockets configuration | +| - [EnableL2SuggestedGasPricePolling](#RPC_EnableL2SuggestedGasPricePolling ) | No | boolean | No | - | EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price. | +| - [TraceBatchUseHTTPS](#RPC_TraceBatchUseHTTPS ) | No | boolean | No | - | TraceBatchUseHTTPS enables, in the debug_traceBatchByNum endpoint, the use of the HTTPS protocol (instead of HTTP)
to do the parallel requests to RPC.debug_traceTransaction endpoint | + +### 8.1. `RPC.Host` + +**Type:** : `string` + +**Default:** `"0.0.0.0"` + +**Description:** Host defines the network adapter that will be used to serve the HTTP requests + +**Example setting the default value** ("0.0.0.0"): +``` +[RPC] +Host="0.0.0.0" +``` + +### 8.2. `RPC.Port` + +**Type:** : `integer` + +**Default:** `8545` + +**Description:** Port defines the port to serve the endpoints via HTTP + +**Example setting the default value** (8545): +``` +[RPC] +Port=8545 +``` + +### 8.3. `RPC.ReadTimeout` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"1m0s"` + +**Description:** ReadTimeout is the HTTP server read timeout +check net/http.server.ReadTimeout and net/http.server.ReadHeaderTimeout + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("1m0s"): +``` +[RPC] +ReadTimeout="1m0s" +``` + +### 8.4. `RPC.WriteTimeout` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"1m0s"` + +**Description:** WriteTimeout is the HTTP server write timeout +check net/http.server.WriteTimeout + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("1m0s"): +``` +[RPC] +WriteTimeout="1m0s" +``` + +### 8.5. `RPC.MaxRequestsPerIPAndSecond` + +**Type:** : `number` + +**Default:** `500` + +**Description:** MaxRequestsPerIPAndSecond defines how much requests a single IP can +send within a single second + +**Example setting the default value** (500): +``` +[RPC] +MaxRequestsPerIPAndSecond=500 +``` + +### 8.6. `RPC.SequencerNodeURI` + +**Type:** : `string` + +**Default:** `""` + +**Description:** SequencerNodeURI is used allow Non-Sequencer nodes +to relay transactions to the Sequencer node + +**Example setting the default value** (""): +``` +[RPC] +SequencerNodeURI="" +``` + +### 8.7. `RPC.MaxCumulativeGasUsed` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** MaxCumulativeGasUsed is the max gas allowed per batch + +**Example setting the default value** (0): +``` +[RPC] +MaxCumulativeGasUsed=0 +``` + +### 8.8. `[RPC.WebSockets]` + +**Type:** : `object` +**Description:** WebSockets configuration + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------- | ------- | ------- | ---------- | ---------- | --------------------------------------------------------------------------- | +| - [Enabled](#RPC_WebSockets_Enabled ) | No | boolean | No | - | Enabled defines if the WebSocket requests are enabled or disabled | +| - [Host](#RPC_WebSockets_Host ) | No | string | No | - | Host defines the network adapter that will be used to serve the WS requests | +| - [Port](#RPC_WebSockets_Port ) | No | integer | No | - | Port defines the port to serve the endpoints via WS | + +#### 8.8.1. `RPC.WebSockets.Enabled` + +**Type:** : `boolean` + +**Default:** `true` + +**Description:** Enabled defines if the WebSocket requests are enabled or disabled + +**Example setting the default value** (true): +``` +[RPC.WebSockets] +Enabled=true +``` + +#### 8.8.2. `RPC.WebSockets.Host` + +**Type:** : `string` + +**Default:** `"0.0.0.0"` + +**Description:** Host defines the network adapter that will be used to serve the WS requests + +**Example setting the default value** ("0.0.0.0"): +``` +[RPC.WebSockets] +Host="0.0.0.0" +``` + +#### 8.8.3. `RPC.WebSockets.Port` + +**Type:** : `integer` + +**Default:** `8546` + +**Description:** Port defines the port to serve the endpoints via WS + +**Example setting the default value** (8546): +``` +[RPC.WebSockets] +Port=8546 +``` + +### 8.9. `RPC.EnableL2SuggestedGasPricePolling` + +**Type:** : `boolean` + +**Default:** `true` + +**Description:** EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price. + +**Example setting the default value** (true): +``` +[RPC] +EnableL2SuggestedGasPricePolling=true +``` + +### 8.10. `RPC.TraceBatchUseHTTPS` + +**Type:** : `boolean` + +**Default:** `true` + +**Description:** TraceBatchUseHTTPS enables, in the debug_traceBatchByNum endpoint, the use of the HTTPS protocol (instead of HTTP) +to do the parallel requests to RPC.debug_traceTransaction endpoint + +**Example setting the default value** (true): +``` +[RPC] +TraceBatchUseHTTPS=true +``` + +## 9. `[Synchronizer]` + +**Type:** : `object` +**Description:** Configuration of service `Syncrhonizer`. For this service is also really important the value of `IsTrustedSequencer` +because depending of this values is going to ask to a trusted node for trusted transactions or not + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ----------------------------------------------------------- | ------- | ------- | ---------- | ---------- | ------------------------------------------------------------------------ | +| - [SyncInterval](#Synchronizer_SyncInterval ) | No | string | No | - | Duration | +| - [SyncChunkSize](#Synchronizer_SyncChunkSize ) | No | integer | No | - | SyncChunkSize is the number of blocks to sync on each chunk | +| - [TrustedSequencerURL](#Synchronizer_TrustedSequencerURL ) | No | string | No | - | TrustedSequencerURL is the rpc url to connect and sync the trusted state | + +### 9.1. `Synchronizer.SyncInterval` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"1s"` + +**Description:** SyncInterval is the delay interval between reading new rollup information + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("1s"): +``` +[Synchronizer] +SyncInterval="1s" +``` + +### 9.2. `Synchronizer.SyncChunkSize` + +**Type:** : `integer` + +**Default:** `100` + +**Description:** SyncChunkSize is the number of blocks to sync on each chunk + +**Example setting the default value** (100): +``` +[Synchronizer] +SyncChunkSize=100 +``` + +### 9.3. `Synchronizer.TrustedSequencerURL` + +**Type:** : `string` + +**Default:** `""` + +**Description:** TrustedSequencerURL is the rpc url to connect and sync the trusted state + +**Example setting the default value** (""): +``` +[Synchronizer] +TrustedSequencerURL="" +``` + +## 10. `[Sequencer]` + +**Type:** : `object` +**Description:** Configuration of the sequencer service + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [WaitPeriodPoolIsEmpty](#Sequencer_WaitPeriodPoolIsEmpty ) | No | string | No | - | Duration | +| - [BlocksAmountForTxsToBeDeleted](#Sequencer_BlocksAmountForTxsToBeDeleted ) | No | integer | No | - | BlocksAmountForTxsToBeDeleted is blocks amount after which txs will be deleted from the pool | +| - [FrequencyToCheckTxsForDelete](#Sequencer_FrequencyToCheckTxsForDelete ) | No | string | No | - | Duration | +| - [MaxTxsPerBatch](#Sequencer_MaxTxsPerBatch ) | No | integer | No | - | MaxTxsPerBatch is the maximum amount of transactions in the batch | +| - [MaxBatchBytesSize](#Sequencer_MaxBatchBytesSize ) | No | integer | No | - | MaxBatchBytesSize is the maximum batch size in bytes
(subtracted bits of all types.Sequence fields excluding BatchL2Data from MaxTxSizeForL1) | +| - [MaxCumulativeGasUsed](#Sequencer_MaxCumulativeGasUsed ) | No | integer | No | - | MaxCumulativeGasUsed is max gas amount used by batch | +| - [MaxKeccakHashes](#Sequencer_MaxKeccakHashes ) | No | integer | No | - | MaxKeccakHashes is max keccak hashes used by batch | +| - [MaxPoseidonHashes](#Sequencer_MaxPoseidonHashes ) | No | integer | No | - | MaxPoseidonHashes is max poseidon hashes batch can handle | +| - [MaxPoseidonPaddings](#Sequencer_MaxPoseidonPaddings ) | No | integer | No | - | MaxPoseidonPaddings is max poseidon paddings batch can handle | +| - [MaxMemAligns](#Sequencer_MaxMemAligns ) | No | integer | No | - | MaxMemAligns is max mem aligns batch can handle | +| - [MaxArithmetics](#Sequencer_MaxArithmetics ) | No | integer | No | - | MaxArithmetics is max arithmetics batch can handle | +| - [MaxBinaries](#Sequencer_MaxBinaries ) | No | integer | No | - | MaxBinaries is max binaries batch can handle | +| - [MaxSteps](#Sequencer_MaxSteps ) | No | integer | No | - | MaxSteps is max steps batch can handle | +| - [WeightBatchBytesSize](#Sequencer_WeightBatchBytesSize ) | No | integer | No | - | WeightBatchBytesSize is the cost weight for the BatchBytesSize batch resource | +| - [WeightCumulativeGasUsed](#Sequencer_WeightCumulativeGasUsed ) | No | integer | No | - | WeightCumulativeGasUsed is the cost weight for the CumulativeGasUsed batch resource | +| - [WeightKeccakHashes](#Sequencer_WeightKeccakHashes ) | No | integer | No | - | WeightKeccakHashes is the cost weight for the KeccakHashes batch resource | +| - [WeightPoseidonHashes](#Sequencer_WeightPoseidonHashes ) | No | integer | No | - | WeightPoseidonHashes is the cost weight for the PoseidonHashes batch resource | +| - [WeightPoseidonPaddings](#Sequencer_WeightPoseidonPaddings ) | No | integer | No | - | WeightPoseidonPaddings is the cost weight for the PoseidonPaddings batch resource | +| - [WeightMemAligns](#Sequencer_WeightMemAligns ) | No | integer | No | - | WeightMemAligns is the cost weight for the MemAligns batch resource | +| - [WeightArithmetics](#Sequencer_WeightArithmetics ) | No | integer | No | - | WeightArithmetics is the cost weight for the Arithmetics batch resource | +| - [WeightBinaries](#Sequencer_WeightBinaries ) | No | integer | No | - | WeightBinaries is the cost weight for the Binaries batch resource | +| - [WeightSteps](#Sequencer_WeightSteps ) | No | integer | No | - | WeightSteps is the cost weight for the Steps batch resource | +| - [TxLifetimeCheckTimeout](#Sequencer_TxLifetimeCheckTimeout ) | No | string | No | - | Duration | +| - [MaxTxLifetime](#Sequencer_MaxTxLifetime ) | No | string | No | - | Duration | +| - [Finalizer](#Sequencer_Finalizer ) | No | object | No | - | Finalizer's specific config properties | +| - [DBManager](#Sequencer_DBManager ) | No | object | No | - | DBManager's specific config properties | +| - [Worker](#Sequencer_Worker ) | No | object | No | - | Worker's specific config properties | +| - [EffectiveGasPrice](#Sequencer_EffectiveGasPrice ) | No | object | No | - | EffectiveGasPrice is the config for the gas price | + +### 10.1. `Sequencer.WaitPeriodPoolIsEmpty` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"1s"` + +**Description:** WaitPeriodPoolIsEmpty is the time the sequencer waits until +trying to add new txs to the state + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("1s"): +``` +[Sequencer] +WaitPeriodPoolIsEmpty="1s" +``` + +### 10.2. `Sequencer.BlocksAmountForTxsToBeDeleted` + +**Type:** : `integer` + +**Default:** `100` + +**Description:** BlocksAmountForTxsToBeDeleted is blocks amount after which txs will be deleted from the pool + +**Example setting the default value** (100): +``` +[Sequencer] +BlocksAmountForTxsToBeDeleted=100 +``` + +### 10.3. `Sequencer.FrequencyToCheckTxsForDelete` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"12h0m0s"` + +**Description:** FrequencyToCheckTxsForDelete is frequency with which txs will be checked for deleting + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("12h0m0s"): +``` +[Sequencer] +FrequencyToCheckTxsForDelete="12h0m0s" +``` + +### 10.4. `Sequencer.MaxTxsPerBatch` + +**Type:** : `integer` + +**Default:** `300` + +**Description:** MaxTxsPerBatch is the maximum amount of transactions in the batch + +**Example setting the default value** (300): +``` +[Sequencer] +MaxTxsPerBatch=300 +``` + +### 10.5. `Sequencer.MaxBatchBytesSize` + +**Type:** : `integer` + +**Default:** `120000` + +**Description:** MaxBatchBytesSize is the maximum batch size in bytes +(subtracted bits of all types.Sequence fields excluding BatchL2Data from MaxTxSizeForL1) + +**Example setting the default value** (120000): +``` +[Sequencer] +MaxBatchBytesSize=120000 +``` + +### 10.6. `Sequencer.MaxCumulativeGasUsed` + +**Type:** : `integer` + +**Default:** `30000000` + +**Description:** MaxCumulativeGasUsed is max gas amount used by batch + +**Example setting the default value** (30000000): +``` +[Sequencer] +MaxCumulativeGasUsed=30000000 +``` + +### 10.7. `Sequencer.MaxKeccakHashes` + +**Type:** : `integer` + +**Default:** `2145` + +**Description:** MaxKeccakHashes is max keccak hashes used by batch + +**Example setting the default value** (2145): +``` +[Sequencer] +MaxKeccakHashes=2145 +``` + +### 10.8. `Sequencer.MaxPoseidonHashes` + +**Type:** : `integer` + +**Default:** `252357` + +**Description:** MaxPoseidonHashes is max poseidon hashes batch can handle + +**Example setting the default value** (252357): +``` +[Sequencer] +MaxPoseidonHashes=252357 +``` + +### 10.9. `Sequencer.MaxPoseidonPaddings` + +**Type:** : `integer` + +**Default:** `135191` + +**Description:** MaxPoseidonPaddings is max poseidon paddings batch can handle + +**Example setting the default value** (135191): +``` +[Sequencer] +MaxPoseidonPaddings=135191 +``` + +### 10.10. `Sequencer.MaxMemAligns` + +**Type:** : `integer` + +**Default:** `236585` + +**Description:** MaxMemAligns is max mem aligns batch can handle + +**Example setting the default value** (236585): +``` +[Sequencer] +MaxMemAligns=236585 +``` + +### 10.11. `Sequencer.MaxArithmetics` + +**Type:** : `integer` + +**Default:** `236585` + +**Description:** MaxArithmetics is max arithmetics batch can handle + +**Example setting the default value** (236585): +``` +[Sequencer] +MaxArithmetics=236585 +``` + +### 10.12. `Sequencer.MaxBinaries` + +**Type:** : `integer` + +**Default:** `473170` + +**Description:** MaxBinaries is max binaries batch can handle + +**Example setting the default value** (473170): +``` +[Sequencer] +MaxBinaries=473170 +``` + +### 10.13. `Sequencer.MaxSteps` + +**Type:** : `integer` + +**Default:** `7570538` + +**Description:** MaxSteps is max steps batch can handle + +**Example setting the default value** (7570538): +``` +[Sequencer] +MaxSteps=7570538 +``` + +### 10.14. `Sequencer.WeightBatchBytesSize` + +**Type:** : `integer` + +**Default:** `1` + +**Description:** WeightBatchBytesSize is the cost weight for the BatchBytesSize batch resource + +**Example setting the default value** (1): +``` +[Sequencer] +WeightBatchBytesSize=1 +``` + +### 10.15. `Sequencer.WeightCumulativeGasUsed` + +**Type:** : `integer` + +**Default:** `1` + +**Description:** WeightCumulativeGasUsed is the cost weight for the CumulativeGasUsed batch resource + +**Example setting the default value** (1): +``` +[Sequencer] +WeightCumulativeGasUsed=1 +``` + +### 10.16. `Sequencer.WeightKeccakHashes` + +**Type:** : `integer` + +**Default:** `1` + +**Description:** WeightKeccakHashes is the cost weight for the KeccakHashes batch resource + +**Example setting the default value** (1): +``` +[Sequencer] +WeightKeccakHashes=1 +``` + +### 10.17. `Sequencer.WeightPoseidonHashes` + +**Type:** : `integer` + +**Default:** `1` + +**Description:** WeightPoseidonHashes is the cost weight for the PoseidonHashes batch resource + +**Example setting the default value** (1): +``` +[Sequencer] +WeightPoseidonHashes=1 +``` + +### 10.18. `Sequencer.WeightPoseidonPaddings` + +**Type:** : `integer` + +**Default:** `1` + +**Description:** WeightPoseidonPaddings is the cost weight for the PoseidonPaddings batch resource + +**Example setting the default value** (1): +``` +[Sequencer] +WeightPoseidonPaddings=1 +``` + +### 10.19. `Sequencer.WeightMemAligns` + +**Type:** : `integer` + +**Default:** `1` + +**Description:** WeightMemAligns is the cost weight for the MemAligns batch resource + +**Example setting the default value** (1): +``` +[Sequencer] +WeightMemAligns=1 +``` + +### 10.20. `Sequencer.WeightArithmetics` + +**Type:** : `integer` + +**Default:** `1` + +**Description:** WeightArithmetics is the cost weight for the Arithmetics batch resource + +**Example setting the default value** (1): +``` +[Sequencer] +WeightArithmetics=1 +``` + +### 10.21. `Sequencer.WeightBinaries` + +**Type:** : `integer` + +**Default:** `1` + +**Description:** WeightBinaries is the cost weight for the Binaries batch resource + +**Example setting the default value** (1): +``` +[Sequencer] +WeightBinaries=1 +``` + +### 10.22. `Sequencer.WeightSteps` + +**Type:** : `integer` + +**Default:** `1` + +**Description:** WeightSteps is the cost weight for the Steps batch resource + +**Example setting the default value** (1): +``` +[Sequencer] +WeightSteps=1 +``` + +### 10.23. `Sequencer.TxLifetimeCheckTimeout` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"10m0s"` + +**Description:** TxLifetimeCheckTimeout is the time the sequencer waits to check txs lifetime + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("10m0s"): +``` +[Sequencer] +TxLifetimeCheckTimeout="10m0s" +``` + +### 10.24. `Sequencer.MaxTxLifetime` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"3h0m0s"` + +**Description:** MaxTxLifetime is the time a tx can be in the sequencer memory + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("3h0m0s"): +``` +[Sequencer] +MaxTxLifetime="3h0m0s" +``` + +### 10.25. `[Sequencer.Finalizer]` + +**Type:** : `object` +**Description:** Finalizer's specific config properties + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------------------------------------------------------------------------ | ------- | ------- | ---------- | ---------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [GERDeadlineTimeout](#Sequencer_Finalizer_GERDeadlineTimeout ) | No | string | No | - | Duration | +| - [ForcedBatchDeadlineTimeout](#Sequencer_Finalizer_ForcedBatchDeadlineTimeout ) | No | string | No | - | Duration | +| - [SleepDuration](#Sequencer_Finalizer_SleepDuration ) | No | string | No | - | Duration | +| - [ResourcePercentageToCloseBatch](#Sequencer_Finalizer_ResourcePercentageToCloseBatch ) | No | integer | No | - | ResourcePercentageToCloseBatch is the percentage window of the resource left out for the batch to be closed | +| - [GERFinalityNumberOfBlocks](#Sequencer_Finalizer_GERFinalityNumberOfBlocks ) | No | integer | No | - | GERFinalityNumberOfBlocks is number of blocks to consider GER final | +| - [ClosingSignalsManagerWaitForCheckingL1Timeout](#Sequencer_Finalizer_ClosingSignalsManagerWaitForCheckingL1Timeout ) | No | string | No | - | Duration | +| - [ClosingSignalsManagerWaitForCheckingGER](#Sequencer_Finalizer_ClosingSignalsManagerWaitForCheckingGER ) | No | string | No | - | Duration | +| - [ClosingSignalsManagerWaitForCheckingForcedBatches](#Sequencer_Finalizer_ClosingSignalsManagerWaitForCheckingForcedBatches ) | No | string | No | - | Duration | +| - [ForcedBatchesFinalityNumberOfBlocks](#Sequencer_Finalizer_ForcedBatchesFinalityNumberOfBlocks ) | No | integer | No | - | ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final | +| - [TimestampResolution](#Sequencer_Finalizer_TimestampResolution ) | No | string | No | - | Duration | +| - [StopSequencerOnBatchNum](#Sequencer_Finalizer_StopSequencerOnBatchNum ) | No | integer | No | - | StopSequencerOnBatchNum specifies the batch number where the Sequencer will stop to process more transactions and generate new batches. The Sequencer will halt after it closes the batch equal to this number | + +#### 10.25.1. `Sequencer.Finalizer.GERDeadlineTimeout` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5s"` + +**Description:** GERDeadlineTimeout is the time the finalizer waits after receiving closing signal to update Global Exit Root + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5s"): +``` +[Sequencer.Finalizer] +GERDeadlineTimeout="5s" +``` + +#### 10.25.2. `Sequencer.Finalizer.ForcedBatchDeadlineTimeout` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"1m0s"` + +**Description:** ForcedBatchDeadlineTimeout is the time the finalizer waits after receiving closing signal to process Forced Batches + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("1m0s"): +``` +[Sequencer.Finalizer] +ForcedBatchDeadlineTimeout="1m0s" +``` + +#### 10.25.3. `Sequencer.Finalizer.SleepDuration` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"100ms"` + +**Description:** SleepDuration is the time the finalizer sleeps between each iteration, if there are no transactions to be processed + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("100ms"): +``` +[Sequencer.Finalizer] +SleepDuration="100ms" +``` + +#### 10.25.4. `Sequencer.Finalizer.ResourcePercentageToCloseBatch` + +**Type:** : `integer` + +**Default:** `10` + +**Description:** ResourcePercentageToCloseBatch is the percentage window of the resource left out for the batch to be closed + +**Example setting the default value** (10): +``` +[Sequencer.Finalizer] +ResourcePercentageToCloseBatch=10 +``` + +#### 10.25.5. `Sequencer.Finalizer.GERFinalityNumberOfBlocks` + +**Type:** : `integer` + +**Default:** `64` + +**Description:** GERFinalityNumberOfBlocks is number of blocks to consider GER final + +**Example setting the default value** (64): +``` +[Sequencer.Finalizer] +GERFinalityNumberOfBlocks=64 +``` + +#### 10.25.6. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingL1Timeout` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"10s"` + +**Description:** ClosingSignalsManagerWaitForCheckingL1Timeout is used by the closing signals manager to wait for its operation + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("10s"): +``` +[Sequencer.Finalizer] +ClosingSignalsManagerWaitForCheckingL1Timeout="10s" +``` + +#### 10.25.7. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingGER` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"10s"` + +**Description:** ClosingSignalsManagerWaitForCheckingGER is used by the closing signals manager to wait for its operation + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("10s"): +``` +[Sequencer.Finalizer] +ClosingSignalsManagerWaitForCheckingGER="10s" +``` + +#### 10.25.8. `Sequencer.Finalizer.ClosingSignalsManagerWaitForCheckingForcedBatches` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"10s"` + +**Description:** ClosingSignalsManagerWaitForCheckingL1Timeout is used by the closing signals manager to wait for its operation + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("10s"): +``` +[Sequencer.Finalizer] +ClosingSignalsManagerWaitForCheckingForcedBatches="10s" +``` + +#### 10.25.9. `Sequencer.Finalizer.ForcedBatchesFinalityNumberOfBlocks` + +**Type:** : `integer` + +**Default:** `64` + +**Description:** ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final + +**Example setting the default value** (64): +``` +[Sequencer.Finalizer] +ForcedBatchesFinalityNumberOfBlocks=64 +``` + +#### 10.25.10. `Sequencer.Finalizer.TimestampResolution` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"10s"` + +**Description:** TimestampResolution is the resolution of the timestamp used to close a batch + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("10s"): +``` +[Sequencer.Finalizer] +TimestampResolution="10s" +``` + +#### 10.25.11. `Sequencer.Finalizer.StopSequencerOnBatchNum` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** StopSequencerOnBatchNum specifies the batch number where the Sequencer will stop to process more transactions and generate new batches. The Sequencer will halt after it closes the batch equal to this number + +**Example setting the default value** (0): +``` +[Sequencer.Finalizer] +StopSequencerOnBatchNum=0 +``` + +### 10.26. `[Sequencer.DBManager]` + +**Type:** : `object` +**Description:** DBManager's specific config properties + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------------------------------------------------- | ------- | ------ | ---------- | ---------- | ----------------- | +| - [PoolRetrievalInterval](#Sequencer_DBManager_PoolRetrievalInterval ) | No | string | No | - | Duration | +| - [L2ReorgRetrievalInterval](#Sequencer_DBManager_L2ReorgRetrievalInterval ) | No | string | No | - | Duration | + +#### 10.26.1. `Sequencer.DBManager.PoolRetrievalInterval` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"500ms"` + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("500ms"): +``` +[Sequencer.DBManager] +PoolRetrievalInterval="500ms" +``` + +#### 10.26.2. `Sequencer.DBManager.L2ReorgRetrievalInterval` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5s"` + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5s"): +``` +[Sequencer.DBManager] +L2ReorgRetrievalInterval="5s" +``` + +### 10.27. `[Sequencer.Worker]` + +**Type:** : `object` +**Description:** Worker's specific config properties + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------------------------------------------------------- | ------- | ------ | ---------- | ---------- | -------------------------------------------------------------- | +| - [ResourceCostMultiplier](#Sequencer_Worker_ResourceCostMultiplier ) | No | number | No | - | ResourceCostMultiplier is the multiplier for the resource cost | + +#### 10.27.1. `Sequencer.Worker.ResourceCostMultiplier` + +**Type:** : `number` + +**Default:** `1000` + +**Description:** ResourceCostMultiplier is the multiplier for the resource cost + +**Example setting the default value** (1000): +``` +[Sequencer.Worker] +ResourceCostMultiplier=1000 +``` + +### 10.28. `[Sequencer.EffectiveGasPrice]` + +**Type:** : `object` +**Description:** EffectiveGasPrice is the config for the gas price + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------------------------------------------------------------ | ------- | ------- | ---------- | ---------- | ----------------------------------------------------------------------------------------------------------------------------------- | +| - [MaxBreakEvenGasPriceDeviationPercentage](#Sequencer_EffectiveGasPrice_MaxBreakEvenGasPriceDeviationPercentage ) | No | integer | No | - | MaxBreakEvenGasPriceDeviationPercentage is the max allowed deviation percentage BreakEvenGasPrice on re-calculation | +| - [L1GasPriceFactor](#Sequencer_EffectiveGasPrice_L1GasPriceFactor ) | No | number | No | - | L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price | +| - [ByteGasCost](#Sequencer_EffectiveGasPrice_ByteGasCost ) | No | integer | No | - | ByteGasCost is the gas cost per byte | +| - [MarginFactor](#Sequencer_EffectiveGasPrice_MarginFactor ) | No | number | No | - | MarginFactor is the margin factor percentage to be added to the L2 min gas price | +| - [Enabled](#Sequencer_EffectiveGasPrice_Enabled ) | No | boolean | No | - | Enabled is a flag to enable/disable the effective gas price | +| - [DefaultMinGasPriceAllowed](#Sequencer_EffectiveGasPrice_DefaultMinGasPriceAllowed ) | No | integer | No | - | DefaultMinGasPriceAllowed is the default min gas price to suggest
This value is assigned from [Pool].DefaultMinGasPriceAllowed | + +#### 10.28.1. `Sequencer.EffectiveGasPrice.MaxBreakEvenGasPriceDeviationPercentage` + +**Type:** : `integer` + +**Default:** `10` + +**Description:** MaxBreakEvenGasPriceDeviationPercentage is the max allowed deviation percentage BreakEvenGasPrice on re-calculation + +**Example setting the default value** (10): +``` +[Sequencer.EffectiveGasPrice] +MaxBreakEvenGasPriceDeviationPercentage=10 +``` + +#### 10.28.2. `Sequencer.EffectiveGasPrice.L1GasPriceFactor` + +**Type:** : `number` + +**Default:** `0.25` + +**Description:** L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price + +**Example setting the default value** (0.25): +``` +[Sequencer.EffectiveGasPrice] +L1GasPriceFactor=0.25 +``` + +#### 10.28.3. `Sequencer.EffectiveGasPrice.ByteGasCost` + +**Type:** : `integer` + +**Default:** `16` + +**Description:** ByteGasCost is the gas cost per byte + +**Example setting the default value** (16): +``` +[Sequencer.EffectiveGasPrice] +ByteGasCost=16 +``` + +#### 10.28.4. `Sequencer.EffectiveGasPrice.MarginFactor` + +**Type:** : `number` + +**Default:** `1` + +**Description:** MarginFactor is the margin factor percentage to be added to the L2 min gas price + +**Example setting the default value** (1): +``` +[Sequencer.EffectiveGasPrice] +MarginFactor=1 +``` + +#### 10.28.5. `Sequencer.EffectiveGasPrice.Enabled` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** Enabled is a flag to enable/disable the effective gas price + +**Example setting the default value** (false): +``` +[Sequencer.EffectiveGasPrice] +Enabled=false +``` + +#### 10.28.6. `Sequencer.EffectiveGasPrice.DefaultMinGasPriceAllowed` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** DefaultMinGasPriceAllowed is the default min gas price to suggest +This value is assigned from [Pool].DefaultMinGasPriceAllowed + +**Example setting the default value** (0): +``` +[Sequencer.EffectiveGasPrice] +DefaultMinGasPriceAllowed=0 +``` + +## 11. `[SequenceSender]` + +**Type:** : `object` +**Description:** Configuration of the sequence sender service + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------------------------------------------------- | ------- | --------------- | ---------- | ---------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| - [WaitPeriodSendSequence](#SequenceSender_WaitPeriodSendSequence ) | No | string | No | - | Duration | +| - [LastBatchVirtualizationTimeMaxWaitPeriod](#SequenceSender_LastBatchVirtualizationTimeMaxWaitPeriod ) | No | string | No | - | Duration | +| - [MaxTxSizeForL1](#SequenceSender_MaxTxSizeForL1 ) | No | integer | No | - | MaxTxSizeForL1 is the maximum size a single transaction can have. This field has
non-trivial consequences: larger transactions than 128KB are significantly harder and
more expensive to propagate; larger transactions also take more resources
to validate whether they fit into the pool or not. | +| - [SenderAddress](#SequenceSender_SenderAddress ) | No | string | No | - | SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs | +| - [PrivateKeys](#SequenceSender_PrivateKeys ) | No | array of object | No | - | PrivateKeys defines all the key store files that are going
to be read in order to provide the private keys to sign the L1 txs | +| - [ForkUpgradeBatchNumber](#SequenceSender_ForkUpgradeBatchNumber ) | No | integer | No | - | Batch number where there is a forkid change (fork upgrade) | + +### 11.1. `SequenceSender.WaitPeriodSendSequence` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5s"` + +**Description:** WaitPeriodSendSequence is the time the sequencer waits until +trying to send a sequence to L1 + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5s"): +``` +[SequenceSender] +WaitPeriodSendSequence="5s" +``` + +### 11.2. `SequenceSender.LastBatchVirtualizationTimeMaxWaitPeriod` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5s"` + +**Description:** LastBatchVirtualizationTimeMaxWaitPeriod is time since sequences should be sent + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5s"): +``` +[SequenceSender] +LastBatchVirtualizationTimeMaxWaitPeriod="5s" +``` + +### 11.3. `SequenceSender.MaxTxSizeForL1` + +**Type:** : `integer` + +**Default:** `131072` + +**Description:** MaxTxSizeForL1 is the maximum size a single transaction can have. This field has +non-trivial consequences: larger transactions than 128KB are significantly harder and +more expensive to propagate; larger transactions also take more resources +to validate whether they fit into the pool or not. + +**Example setting the default value** (131072): +``` +[SequenceSender] +MaxTxSizeForL1=131072 +``` + +### 11.4. `SequenceSender.SenderAddress` + +**Type:** : `string` + +**Default:** `"0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266"` + +**Description:** SenderAddress defines which private key the eth tx manager needs to use +to sign the L1 txs + +**Example setting the default value** ("0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266"): +``` +[SequenceSender] +SenderAddress="0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266" +``` + +### 11.5. `SequenceSender.PrivateKeys` + +**Type:** : `array of object` + +**Default:** `[{"Path": "/pk/sequencer.keystore", "Password": "testonly"}]` + +**Description:** PrivateKeys defines all the key store files that are going +to be read in order to provide the private keys to sign the L1 txs + +**Example setting the default value** ([{"Path": "/pk/sequencer.keystore", "Password": "testonly"}]): +``` +[SequenceSender] +PrivateKeys=[{"Path": "/pk/sequencer.keystore", "Password": "testonly"}] +``` + +| | Array restrictions | +| -------------------- | ------------------ | +| **Min items** | N/A | +| **Max items** | N/A | +| **Items unicity** | False | +| **Additional items** | False | +| **Tuple validation** | See below | + +| Each item of this array must be | Description | +| ------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| [PrivateKeys items](#SequenceSender_PrivateKeys_items) | KeystoreFileConfig has all the information needed to load a private key from a k ... | + +#### 11.5.1. [SequenceSender.PrivateKeys.PrivateKeys items] + +**Type:** : `object` +**Description:** KeystoreFileConfig has all the information needed to load a private key from a key store file + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------------------------------------------- | ------- | ------ | ---------- | ---------- | ------------------------------------------------------ | +| - [Path](#SequenceSender_PrivateKeys_items_Path ) | No | string | No | - | Path is the file path for the key store file | +| - [Password](#SequenceSender_PrivateKeys_items_Password ) | No | string | No | - | Password is the password to decrypt the key store file | + +##### 11.5.1.1. `SequenceSender.PrivateKeys.PrivateKeys items.Path` + +**Type:** : `string` +**Description:** Path is the file path for the key store file + +##### 11.5.1.2. `SequenceSender.PrivateKeys.PrivateKeys items.Password` + +**Type:** : `string` +**Description:** Password is the password to decrypt the key store file + +### 11.6. `SequenceSender.ForkUpgradeBatchNumber` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** Batch number where there is a forkid change (fork upgrade) + +**Example setting the default value** (0): +``` +[SequenceSender] +ForkUpgradeBatchNumber=0 +``` + +## 12. `[Aggregator]` + +**Type:** : `object` +**Description:** Configuration of the aggregator service + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------------------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| - [Host](#Aggregator_Host ) | No | string | No | - | Host for the grpc server | +| - [Port](#Aggregator_Port ) | No | integer | No | - | Port for the grpc server | +| - [RetryTime](#Aggregator_RetryTime ) | No | string | No | - | Duration | +| - [VerifyProofInterval](#Aggregator_VerifyProofInterval ) | No | string | No | - | Duration | +| - [ProofStatePollingInterval](#Aggregator_ProofStatePollingInterval ) | No | string | No | - | Duration | +| - [TxProfitabilityCheckerType](#Aggregator_TxProfitabilityCheckerType ) | No | string | No | - | TxProfitabilityCheckerType type for checking is it profitable for aggregator to validate batch
possible values: base/acceptall | +| - [TxProfitabilityMinReward](#Aggregator_TxProfitabilityMinReward ) | No | object | No | - | TxProfitabilityMinReward min reward for base tx profitability checker when aggregator will validate batch
this parameter is used for the base tx profitability checker | +| - [IntervalAfterWhichBatchConsolidateAnyway](#Aggregator_IntervalAfterWhichBatchConsolidateAnyway ) | No | string | No | - | Duration | +| - [ChainID](#Aggregator_ChainID ) | No | integer | No | - | ChainID is the L2 ChainID provided by the Network Config | +| - [ForkId](#Aggregator_ForkId ) | No | integer | No | - | ForkID is the L2 ForkID provided by the Network Config | +| - [SenderAddress](#Aggregator_SenderAddress ) | No | string | No | - | SenderAddress defines which private key the eth tx manager needs to use
to sign the L1 txs | +| - [CleanupLockedProofsInterval](#Aggregator_CleanupLockedProofsInterval ) | No | string | No | - | Duration | +| - [GeneratingProofCleanupThreshold](#Aggregator_GeneratingProofCleanupThreshold ) | No | string | No | - | GeneratingProofCleanupThreshold represents the time interval after
which a proof in generating state is considered to be stuck and
allowed to be cleared. | + +### 12.1. `Aggregator.Host` + +**Type:** : `string` + +**Default:** `"0.0.0.0"` + +**Description:** Host for the grpc server + +**Example setting the default value** ("0.0.0.0"): +``` +[Aggregator] +Host="0.0.0.0" +``` + +### 12.2. `Aggregator.Port` + +**Type:** : `integer` + +**Default:** `50081` + +**Description:** Port for the grpc server + +**Example setting the default value** (50081): +``` +[Aggregator] +Port=50081 +``` + +### 12.3. `Aggregator.RetryTime` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5s"` + +**Description:** RetryTime is the time the aggregator main loop sleeps if there are no proofs to aggregate +or batches to generate proofs. It is also used in the isSynced loop + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5s"): +``` +[Aggregator] +RetryTime="5s" +``` + +### 12.4. `Aggregator.VerifyProofInterval` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"1m30s"` + +**Description:** VerifyProofInterval is the interval of time to verify/send an proof in L1 + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("1m30s"): +``` +[Aggregator] +VerifyProofInterval="1m30s" +``` + +### 12.5. `Aggregator.ProofStatePollingInterval` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5s"` + +**Description:** ProofStatePollingInterval is the interval time to polling the prover about the generation state of a proof + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5s"): +``` +[Aggregator] +ProofStatePollingInterval="5s" +``` + +### 12.6. `Aggregator.TxProfitabilityCheckerType` + +**Type:** : `string` + +**Default:** `"acceptall"` + +**Description:** TxProfitabilityCheckerType type for checking is it profitable for aggregator to validate batch +possible values: base/acceptall + +**Example setting the default value** ("acceptall"): +``` +[Aggregator] +TxProfitabilityCheckerType="acceptall" +``` + +### 12.7. `[Aggregator.TxProfitabilityMinReward]` + +**Type:** : `object` +**Description:** TxProfitabilityMinReward min reward for base tx profitability checker when aggregator will validate batch +this parameter is used for the base tx profitability checker + +### 12.8. `Aggregator.IntervalAfterWhichBatchConsolidateAnyway` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"0s"` + +**Description:** IntervalAfterWhichBatchConsolidateAnyway this is interval for the main sequencer, that will check if there is no transactions + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("0s"): +``` +[Aggregator] +IntervalAfterWhichBatchConsolidateAnyway="0s" +``` + +### 12.9. `Aggregator.ChainID` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** ChainID is the L2 ChainID provided by the Network Config + +**Example setting the default value** (0): +``` +[Aggregator] +ChainID=0 +``` + +### 12.10. `Aggregator.ForkId` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** ForkID is the L2 ForkID provided by the Network Config + +**Example setting the default value** (0): +``` +[Aggregator] +ForkId=0 +``` + +### 12.11. `Aggregator.SenderAddress` + +**Type:** : `string` + +**Default:** `""` + +**Description:** SenderAddress defines which private key the eth tx manager needs to use +to sign the L1 txs + +**Example setting the default value** (""): +``` +[Aggregator] +SenderAddress="" +``` + +### 12.12. `Aggregator.CleanupLockedProofsInterval` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"2m0s"` + +**Description:** CleanupLockedProofsInterval is the interval of time to clean up locked proofs. + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("2m0s"): +``` +[Aggregator] +CleanupLockedProofsInterval="2m0s" +``` + +### 12.13. `Aggregator.GeneratingProofCleanupThreshold` + +**Type:** : `string` + +**Default:** `"10m"` + +**Description:** GeneratingProofCleanupThreshold represents the time interval after +which a proof in generating state is considered to be stuck and +allowed to be cleared. + +**Example setting the default value** ("10m"): +``` +[Aggregator] +GeneratingProofCleanupThreshold="10m" +``` + +## 13. `[NetworkConfig]` + +**Type:** : `object` +**Description:** Configuration of the genesis of the network. This is used to known the initial state of the network + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------------------------------------------------- | ------- | ---------------- | ---------- | ---------- | ----------------------------------------------------------------------------------- | +| - [l1Config](#NetworkConfig_l1Config ) | No | object | No | - | L1: Configuration related to L1 | +| - [L2GlobalExitRootManagerAddr](#NetworkConfig_L2GlobalExitRootManagerAddr ) | No | array of integer | No | - | DEPRECATED L2: address of the \`PolygonZkEVMGlobalExitRootL2 proxy\` smart contract | +| - [L2BridgeAddr](#NetworkConfig_L2BridgeAddr ) | No | array of integer | No | - | L2: address of the \`PolygonZkEVMBridge proxy\` smart contract | +| - [Genesis](#NetworkConfig_Genesis ) | No | object | No | - | L1: Genesis of the rollup, first block number and root | + +### 13.1. `[NetworkConfig.l1Config]` + +**Type:** : `object` +**Description:** L1: Configuration related to L1 + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------------------------------------------- | ------- | ---------------- | ---------- | ---------- | ------------------------------------------------ | +| - [chainId](#NetworkConfig_l1Config_chainId ) | No | integer | No | - | Chain ID of the L1 network | +| - [polygonZkEVMAddress](#NetworkConfig_l1Config_polygonZkEVMAddress ) | No | array of integer | No | - | Address of the L1 contract | +| - [maticTokenAddress](#NetworkConfig_l1Config_maticTokenAddress ) | No | array of integer | No | - | Address of the L1 Matic token Contract | +| - [polygonZkEVMGlobalExitRootAddress](#NetworkConfig_l1Config_polygonZkEVMGlobalExitRootAddress ) | No | array of integer | No | - | Address of the L1 GlobalExitRootManager contract | + +#### 13.1.1. `NetworkConfig.l1Config.chainId` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** Chain ID of the L1 network + +**Example setting the default value** (0): +``` +[NetworkConfig.l1Config] +chainId=0 +``` + +#### 13.1.2. `NetworkConfig.l1Config.polygonZkEVMAddress` + +**Type:** : `array of integer` +**Description:** Address of the L1 contract + +#### 13.1.3. `NetworkConfig.l1Config.maticTokenAddress` + +**Type:** : `array of integer` +**Description:** Address of the L1 Matic token Contract + +#### 13.1.4. `NetworkConfig.l1Config.polygonZkEVMGlobalExitRootAddress` + +**Type:** : `array of integer` +**Description:** Address of the L1 GlobalExitRootManager contract + +### 13.2. `NetworkConfig.L2GlobalExitRootManagerAddr` + +**Type:** : `array of integer` +**Description:** DEPRECATED L2: address of the `PolygonZkEVMGlobalExitRootL2 proxy` smart contract + +### 13.3. `NetworkConfig.L2BridgeAddr` + +**Type:** : `array of integer` +**Description:** L2: address of the `PolygonZkEVMBridge proxy` smart contract + +### 13.4. `[NetworkConfig.Genesis]` + +**Type:** : `object` +**Description:** L1: Genesis of the rollup, first block number and root + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------ | ------- | ---------------- | ---------- | ---------- | --------------------------------------------------------------------------------- | +| - [GenesisBlockNum](#NetworkConfig_Genesis_GenesisBlockNum ) | No | integer | No | - | GenesisBlockNum is the block number where the polygonZKEVM smc was deployed on L1 | +| - [Root](#NetworkConfig_Genesis_Root ) | No | array of integer | No | - | Root hash of the genesis block | +| - [GenesisActions](#NetworkConfig_Genesis_GenesisActions ) | No | array of object | No | - | Contracts to be deployed to L2 | + +#### 13.4.1. `NetworkConfig.Genesis.GenesisBlockNum` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** GenesisBlockNum is the block number where the polygonZKEVM smc was deployed on L1 + +**Example setting the default value** (0): +``` +[NetworkConfig.Genesis] +GenesisBlockNum=0 +``` + +#### 13.4.2. `NetworkConfig.Genesis.Root` + +**Type:** : `array of integer` +**Description:** Root hash of the genesis block + +#### 13.4.3. `NetworkConfig.Genesis.GenesisActions` + +**Type:** : `array of object` +**Description:** Contracts to be deployed to L2 + +| | Array restrictions | +| -------------------- | ------------------ | +| **Min items** | N/A | +| **Max items** | N/A | +| **Items unicity** | False | +| **Additional items** | False | +| **Tuple validation** | See below | + +| Each item of this array must be | Description | +| ------------------------------------------------------------------- | ------------------------------------------------------------------------- | +| [GenesisActions items](#NetworkConfig_Genesis_GenesisActions_items) | GenesisAction represents one of the values set on the SMT during genesis. | + +##### 13.4.3.1. [NetworkConfig.Genesis.GenesisActions.GenesisActions items] + +**Type:** : `object` +**Description:** GenesisAction represents one of the values set on the SMT during genesis. + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | ----------------- | +| - [address](#NetworkConfig_Genesis_GenesisActions_items_address ) | No | string | No | - | - | +| - [type](#NetworkConfig_Genesis_GenesisActions_items_type ) | No | integer | No | - | - | +| - [storagePosition](#NetworkConfig_Genesis_GenesisActions_items_storagePosition ) | No | string | No | - | - | +| - [bytecode](#NetworkConfig_Genesis_GenesisActions_items_bytecode ) | No | string | No | - | - | +| - [key](#NetworkConfig_Genesis_GenesisActions_items_key ) | No | string | No | - | - | +| - [value](#NetworkConfig_Genesis_GenesisActions_items_value ) | No | string | No | - | - | +| - [root](#NetworkConfig_Genesis_GenesisActions_items_root ) | No | string | No | - | - | + +##### 13.4.3.1.1. `NetworkConfig.Genesis.GenesisActions.GenesisActions items.address` + +**Type:** : `string` + +##### 13.4.3.1.2. `NetworkConfig.Genesis.GenesisActions.GenesisActions items.type` + +**Type:** : `integer` + +##### 13.4.3.1.3. `NetworkConfig.Genesis.GenesisActions.GenesisActions items.storagePosition` + +**Type:** : `string` + +##### 13.4.3.1.4. `NetworkConfig.Genesis.GenesisActions.GenesisActions items.bytecode` + +**Type:** : `string` + +##### 13.4.3.1.5. `NetworkConfig.Genesis.GenesisActions.GenesisActions items.key` + +**Type:** : `string` + +##### 13.4.3.1.6. `NetworkConfig.Genesis.GenesisActions.GenesisActions items.value` + +**Type:** : `string` + +##### 13.4.3.1.7. `NetworkConfig.Genesis.GenesisActions.GenesisActions items.root` + +**Type:** : `string` + +## 14. `[L2GasPriceSuggester]` + +**Type:** : `object` +**Description:** Configuration of the gas price suggester service + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------------------------ | ------- | ------- | ---------- | ---------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| - [Type](#L2GasPriceSuggester_Type ) | No | string | No | - | - | +| - [DefaultGasPriceWei](#L2GasPriceSuggester_DefaultGasPriceWei ) | No | integer | No | - | DefaultGasPriceWei is used to set the gas price to be used by the default gas pricer or as minimim gas price by the follower gas pricer. | +| - [MaxGasPriceWei](#L2GasPriceSuggester_MaxGasPriceWei ) | No | integer | No | - | MaxGasPriceWei is used to limit the gas price returned by the follower gas pricer to a maximum value. It is ignored if 0. | +| - [MaxPrice](#L2GasPriceSuggester_MaxPrice ) | No | object | No | - | - | +| - [IgnorePrice](#L2GasPriceSuggester_IgnorePrice ) | No | object | No | - | - | +| - [CheckBlocks](#L2GasPriceSuggester_CheckBlocks ) | No | integer | No | - | - | +| - [Percentile](#L2GasPriceSuggester_Percentile ) | No | integer | No | - | - | +| - [UpdatePeriod](#L2GasPriceSuggester_UpdatePeriod ) | No | string | No | - | Duration | +| - [CleanHistoryPeriod](#L2GasPriceSuggester_CleanHistoryPeriod ) | No | string | No | - | Duration | +| - [CleanHistoryTimeRetention](#L2GasPriceSuggester_CleanHistoryTimeRetention ) | No | string | No | - | Duration | +| - [Factor](#L2GasPriceSuggester_Factor ) | No | number | No | - | - | + +### 14.1. `L2GasPriceSuggester.Type` + +**Type:** : `string` + +**Default:** `"follower"` + +**Example setting the default value** ("follower"): +``` +[L2GasPriceSuggester] +Type="follower" +``` + +### 14.2. `L2GasPriceSuggester.DefaultGasPriceWei` + +**Type:** : `integer` + +**Default:** `2000000000` + +**Description:** DefaultGasPriceWei is used to set the gas price to be used by the default gas pricer or as minimim gas price by the follower gas pricer. + +**Example setting the default value** (2000000000): +``` +[L2GasPriceSuggester] +DefaultGasPriceWei=2000000000 +``` + +### 14.3. `L2GasPriceSuggester.MaxGasPriceWei` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** MaxGasPriceWei is used to limit the gas price returned by the follower gas pricer to a maximum value. It is ignored if 0. + +**Example setting the default value** (0): +``` +[L2GasPriceSuggester] +MaxGasPriceWei=0 +``` + +### 14.4. `[L2GasPriceSuggester.MaxPrice]` + +**Type:** : `object` + +### 14.5. `[L2GasPriceSuggester.IgnorePrice]` + +**Type:** : `object` + +### 14.6. `L2GasPriceSuggester.CheckBlocks` + +**Type:** : `integer` + +**Default:** `0` + +**Example setting the default value** (0): +``` +[L2GasPriceSuggester] +CheckBlocks=0 +``` + +### 14.7. `L2GasPriceSuggester.Percentile` + +**Type:** : `integer` + +**Default:** `0` + +**Example setting the default value** (0): +``` +[L2GasPriceSuggester] +Percentile=0 +``` + +### 14.8. `L2GasPriceSuggester.UpdatePeriod` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"10s"` + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("10s"): +``` +[L2GasPriceSuggester] +UpdatePeriod="10s" +``` + +### 14.9. `L2GasPriceSuggester.CleanHistoryPeriod` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"1h0m0s"` + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("1h0m0s"): +``` +[L2GasPriceSuggester] +CleanHistoryPeriod="1h0m0s" +``` + +### 14.10. `L2GasPriceSuggester.CleanHistoryTimeRetention` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"5m0s"` + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("5m0s"): +``` +[L2GasPriceSuggester] +CleanHistoryTimeRetention="5m0s" +``` + +### 14.11. `L2GasPriceSuggester.Factor` + +**Type:** : `number` + +**Default:** `0.15` + +**Example setting the default value** (0.15): +``` +[L2GasPriceSuggester] +Factor=0.15 +``` + +## 15. `[Executor]` + +**Type:** : `object` +**Description:** Configuration of the executor service + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------------------------------- | ------- | ------- | ---------- | ---------- | ----------------------------------------------------------------------------------------------------------------------- | +| - [URI](#Executor_URI ) | No | string | No | - | - | +| - [MaxResourceExhaustedAttempts](#Executor_MaxResourceExhaustedAttempts ) | No | integer | No | - | MaxResourceExhaustedAttempts is the max number of attempts to make a transaction succeed because of resource exhaustion | +| - [WaitOnResourceExhaustion](#Executor_WaitOnResourceExhaustion ) | No | string | No | - | Duration | +| - [MaxGRPCMessageSize](#Executor_MaxGRPCMessageSize ) | No | integer | No | - | - | + +### 15.1. `Executor.URI` + +**Type:** : `string` + +**Default:** `"zkevm-prover:50071"` + +**Example setting the default value** ("zkevm-prover:50071"): +``` +[Executor] +URI="zkevm-prover:50071" +``` + +### 15.2. `Executor.MaxResourceExhaustedAttempts` + +**Type:** : `integer` + +**Default:** `3` + +**Description:** MaxResourceExhaustedAttempts is the max number of attempts to make a transaction succeed because of resource exhaustion + +**Example setting the default value** (3): +``` +[Executor] +MaxResourceExhaustedAttempts=3 +``` + +### 15.3. `Executor.WaitOnResourceExhaustion` + +**Title:** Duration + +**Type:** : `string` + +**Default:** `"1s"` + +**Description:** WaitOnResourceExhaustion is the time to wait before retrying a transaction because of resource exhaustion + +**Examples:** + +```json +"1m" +``` + +```json +"300ms" +``` + +**Example setting the default value** ("1s"): +``` +[Executor] +WaitOnResourceExhaustion="1s" +``` + +### 15.4. `Executor.MaxGRPCMessageSize` + +**Type:** : `integer` + +**Default:** `100000000` + +**Example setting the default value** (100000000): +``` +[Executor] +MaxGRPCMessageSize=100000000 +``` + +## 16. `[MTClient]` + +**Type:** : `object` +**Description:** Configuration of the merkle tree client service. Not use in the node, only for testing + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ----------------------- | ------- | ------ | ---------- | ---------- | ---------------------- | +| - [URI](#MTClient_URI ) | No | string | No | - | URI is the server URI. | + +### 16.1. `MTClient.URI` + +**Type:** : `string` + +**Default:** `"zkevm-prover:50061"` + +**Description:** URI is the server URI. + +**Example setting the default value** ("zkevm-prover:50061"): +``` +[MTClient] +URI="zkevm-prover:50061" +``` + +## 17. `[StateDB]` + +**Type:** : `object` +**Description:** Configuration of the state database connection + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ---------------------------------- | ------- | ------- | ---------- | ---------- | ---------------------------------------------------------- | +| - [Name](#StateDB_Name ) | No | string | No | - | Database name | +| - [User](#StateDB_User ) | No | string | No | - | Database User name | +| - [Password](#StateDB_Password ) | No | string | No | - | Database Password of the user | +| - [Host](#StateDB_Host ) | No | string | No | - | Host address of database | +| - [Port](#StateDB_Port ) | No | string | No | - | Port Number of database | +| - [EnableLog](#StateDB_EnableLog ) | No | boolean | No | - | EnableLog | +| - [MaxConns](#StateDB_MaxConns ) | No | integer | No | - | MaxConns is the maximum number of connections in the pool. | + +### 17.1. `StateDB.Name` + +**Type:** : `string` + +**Default:** `"state_db"` + +**Description:** Database name + +**Example setting the default value** ("state_db"): +``` +[StateDB] +Name="state_db" +``` + +### 17.2. `StateDB.User` + +**Type:** : `string` + +**Default:** `"state_user"` + +**Description:** Database User name + +**Example setting the default value** ("state_user"): +``` +[StateDB] +User="state_user" +``` + +### 17.3. `StateDB.Password` + +**Type:** : `string` + +**Default:** `"state_password"` + +**Description:** Database Password of the user + +**Example setting the default value** ("state_password"): +``` +[StateDB] +Password="state_password" +``` + +### 17.4. `StateDB.Host` + +**Type:** : `string` + +**Default:** `"zkevm-state-db"` + +**Description:** Host address of database + +**Example setting the default value** ("zkevm-state-db"): +``` +[StateDB] +Host="zkevm-state-db" +``` + +### 17.5. `StateDB.Port` + +**Type:** : `string` + +**Default:** `"5432"` + +**Description:** Port Number of database + +**Example setting the default value** ("5432"): +``` +[StateDB] +Port="5432" +``` + +### 17.6. `StateDB.EnableLog` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** EnableLog + +**Example setting the default value** (false): +``` +[StateDB] +EnableLog=false +``` + +### 17.7. `StateDB.MaxConns` + +**Type:** : `integer` + +**Default:** `200` + +**Description:** MaxConns is the maximum number of connections in the pool. + +**Example setting the default value** (200): +``` +[StateDB] +MaxConns=200 +``` + +## 18. `[Metrics]` + +**Type:** : `object` +**Description:** Configuration of the metrics service, basically is where is going to publish the metrics + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| ------------------------------------------------ | ------- | ------- | ---------- | ---------- | ------------------------------------------------------------------- | +| - [Host](#Metrics_Host ) | No | string | No | - | Host is the address to bind the metrics server | +| - [Port](#Metrics_Port ) | No | integer | No | - | Port is the port to bind the metrics server | +| - [Enabled](#Metrics_Enabled ) | No | boolean | No | - | Enabled is the flag to enable/disable the metrics server | +| - [ProfilingHost](#Metrics_ProfilingHost ) | No | string | No | - | ProfilingHost is the address to bind the profiling server | +| - [ProfilingPort](#Metrics_ProfilingPort ) | No | integer | No | - | ProfilingPort is the port to bind the profiling server | +| - [ProfilingEnabled](#Metrics_ProfilingEnabled ) | No | boolean | No | - | ProfilingEnabled is the flag to enable/disable the profiling server | + +### 18.1. `Metrics.Host` + +**Type:** : `string` + +**Default:** `"0.0.0.0"` + +**Description:** Host is the address to bind the metrics server + +**Example setting the default value** ("0.0.0.0"): +``` +[Metrics] +Host="0.0.0.0" +``` + +### 18.2. `Metrics.Port` + +**Type:** : `integer` + +**Default:** `9091` + +**Description:** Port is the port to bind the metrics server + +**Example setting the default value** (9091): +``` +[Metrics] +Port=9091 +``` + +### 18.3. `Metrics.Enabled` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** Enabled is the flag to enable/disable the metrics server + +**Example setting the default value** (false): +``` +[Metrics] +Enabled=false +``` + +### 18.4. `Metrics.ProfilingHost` + +**Type:** : `string` + +**Default:** `""` + +**Description:** ProfilingHost is the address to bind the profiling server + +**Example setting the default value** (""): +``` +[Metrics] +ProfilingHost="" +``` + +### 18.5. `Metrics.ProfilingPort` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** ProfilingPort is the port to bind the profiling server + +**Example setting the default value** (0): +``` +[Metrics] +ProfilingPort=0 +``` + +### 18.6. `Metrics.ProfilingEnabled` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** ProfilingEnabled is the flag to enable/disable the profiling server + +**Example setting the default value** (false): +``` +[Metrics] +ProfilingEnabled=false +``` + +## 19. `[EventLog]` + +**Type:** : `object` +**Description:** Configuration of the event database connection + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------- | ------- | ------ | ---------- | ---------- | -------------------------------- | +| - [DB](#EventLog_DB ) | No | object | No | - | DB is the database configuration | + +### 19.1. `[EventLog.DB]` + +**Type:** : `object` +**Description:** DB is the database configuration + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| -------------------------------------- | ------- | ------- | ---------- | ---------- | ---------------------------------------------------------- | +| - [Name](#EventLog_DB_Name ) | No | string | No | - | Database name | +| - [User](#EventLog_DB_User ) | No | string | No | - | Database User name | +| - [Password](#EventLog_DB_Password ) | No | string | No | - | Database Password of the user | +| - [Host](#EventLog_DB_Host ) | No | string | No | - | Host address of database | +| - [Port](#EventLog_DB_Port ) | No | string | No | - | Port Number of database | +| - [EnableLog](#EventLog_DB_EnableLog ) | No | boolean | No | - | EnableLog | +| - [MaxConns](#EventLog_DB_MaxConns ) | No | integer | No | - | MaxConns is the maximum number of connections in the pool. | + +#### 19.1.1. `EventLog.DB.Name` + +**Type:** : `string` + +**Default:** `""` + +**Description:** Database name + +**Example setting the default value** (""): +``` +[EventLog.DB] +Name="" +``` + +#### 19.1.2. `EventLog.DB.User` + +**Type:** : `string` + +**Default:** `""` + +**Description:** Database User name + +**Example setting the default value** (""): +``` +[EventLog.DB] +User="" +``` + +#### 19.1.3. `EventLog.DB.Password` + +**Type:** : `string` + +**Default:** `""` + +**Description:** Database Password of the user + +**Example setting the default value** (""): +``` +[EventLog.DB] +Password="" +``` + +#### 19.1.4. `EventLog.DB.Host` + +**Type:** : `string` + +**Default:** `""` + +**Description:** Host address of database + +**Example setting the default value** (""): +``` +[EventLog.DB] +Host="" +``` + +#### 19.1.5. `EventLog.DB.Port` + +**Type:** : `string` + +**Default:** `""` + +**Description:** Port Number of database + +**Example setting the default value** (""): +``` +[EventLog.DB] +Port="" +``` + +#### 19.1.6. `EventLog.DB.EnableLog` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** EnableLog + +**Example setting the default value** (false): +``` +[EventLog.DB] +EnableLog=false +``` + +#### 19.1.7. `EventLog.DB.MaxConns` + +**Type:** : `integer` + +**Default:** `0` + +**Description:** MaxConns is the maximum number of connections in the pool. + +**Example setting the default value** (0): +``` +[EventLog.DB] +MaxConns=0 +``` + +## 20. `[HashDB]` + +**Type:** : `object` +**Description:** Configuration of the hash database connection + +| Property | Pattern | Type | Deprecated | Definition | Title/Description | +| --------------------------------- | ------- | ------- | ---------- | ---------- | ---------------------------------------------------------- | +| - [Name](#HashDB_Name ) | No | string | No | - | Database name | +| - [User](#HashDB_User ) | No | string | No | - | Database User name | +| - [Password](#HashDB_Password ) | No | string | No | - | Database Password of the user | +| - [Host](#HashDB_Host ) | No | string | No | - | Host address of database | +| - [Port](#HashDB_Port ) | No | string | No | - | Port Number of database | +| - [EnableLog](#HashDB_EnableLog ) | No | boolean | No | - | EnableLog | +| - [MaxConns](#HashDB_MaxConns ) | No | integer | No | - | MaxConns is the maximum number of connections in the pool. | + +### 20.1. `HashDB.Name` + +**Type:** : `string` + +**Default:** `"prover_db"` + +**Description:** Database name + +**Example setting the default value** ("prover_db"): +``` +[HashDB] +Name="prover_db" +``` + +### 20.2. `HashDB.User` + +**Type:** : `string` + +**Default:** `"prover_user"` + +**Description:** Database User name + +**Example setting the default value** ("prover_user"): +``` +[HashDB] +User="prover_user" +``` + +### 20.3. `HashDB.Password` + +**Type:** : `string` + +**Default:** `"prover_pass"` + +**Description:** Database Password of the user + +**Example setting the default value** ("prover_pass"): +``` +[HashDB] +Password="prover_pass" +``` + +### 20.4. `HashDB.Host` + +**Type:** : `string` + +**Default:** `"zkevm-state-db"` + +**Description:** Host address of database + +**Example setting the default value** ("zkevm-state-db"): +``` +[HashDB] +Host="zkevm-state-db" +``` + +### 20.5. `HashDB.Port` + +**Type:** : `string` + +**Default:** `"5432"` + +**Description:** Port Number of database + +**Example setting the default value** ("5432"): +``` +[HashDB] +Port="5432" +``` + +### 20.6. `HashDB.EnableLog` + +**Type:** : `boolean` + +**Default:** `false` + +**Description:** EnableLog + +**Example setting the default value** (false): +``` +[HashDB] +EnableLog=false +``` + +### 20.7. `HashDB.MaxConns` + +**Type:** : `integer` + +**Default:** `200` + +**Description:** MaxConns is the maximum number of connections in the pool. + +**Example setting the default value** (200): +``` +[HashDB] +MaxConns=200 +``` + +---------------------------------------------------------------------------------------------------------------------------- +Generated using [json-schema-for-humans](https://github.com/coveooss/json-schema-for-humans) diff --git a/docs/config-file/node-config-schema.json b/docs/config-file/node-config-schema.json new file mode 100644 index 0000000000..bed2336c99 --- /dev/null +++ b/docs/config-file/node-config-schema.json @@ -0,0 +1,1256 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "github.com/0xPolygonHermez/zkevm-node/config/config", + "properties": { + "IsTrustedSequencer": { + "type": "boolean", + "description": "This define is a trusted node (`true`) or a permission less (`false`). If you don't known\nset to `false`", + "default": false + }, + "ForkUpgradeBatchNumber": { + "type": "integer", + "description": "Last batch number before a forkid change (fork upgrade). That implies that\ngreater batch numbers are going to be trusted but no virtualized neither verified.\nSo after the batch number `ForkUpgradeBatchNumber` is virtualized and verified you could update\nthe system (SC,...) to new forkId and remove this value to allow the system to keep\nVirtualizing and verifying the new batchs.\nCheck issue [#2236](https://github.com/0xPolygonHermez/zkevm-node/issues/2236) to known more\nThis value overwrite `SequenceSender.ForkUpgradeBatchNumber`", + "default": 0 + }, + "ForkUpgradeNewForkId": { + "type": "integer", + "description": "Which is the new forkId", + "default": 0 + }, + "Log": { + "properties": { + "Environment": { + "type": "string", + "enum": [ + "production", + "development" + ], + "description": "Environment defining the log format (\"production\" or \"development\").\nIn development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above.\nCheck [here](https://pkg.go.dev/go.uber.org/zap@v1.24.0#NewDevelopmentConfig)", + "default": "development" + }, + "Level": { + "type": "string", + "enum": [ + "debug", + "info", + "warn", + "error", + "dpanic", + "panic", + "fatal" + ], + "description": "Level of log. As lower value more logs are going to be generated", + "default": "info" + }, + "Outputs": { + "items": { + "type": "string" + }, + "type": "array", + "description": "Outputs", + "default": [ + "stderr" + ] + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configure Log level for all the services, allow also to store the logs in a file" + }, + "Etherman": { + "properties": { + "URL": { + "type": "string", + "description": "URL is the URL of the Ethereum node for L1", + "default": "http://localhost:8545" + }, + "MultiGasProvider": { + "type": "boolean", + "description": "allow that L1 gas price calculation use multiples sources", + "default": false + }, + "Etherscan": { + "properties": { + "ApiKey": { + "type": "string", + "description": "Need API key to use etherscan, if it's empty etherscan is not used", + "default": "" + }, + "Url": { + "type": "string", + "description": "URL of the etherscan API. Overwritten with a hardcoded URL: \"https://api.etherscan.io/api?module=gastracker\u0026action=gasoracle\u0026apikey=\"", + "default": "" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration for use Etherscan as used as gas provider, basically it needs the API-KEY" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the etherman (client for access L1)" + }, + "EthTxManager": { + "properties": { + "FrequencyToMonitorTxs": { + "type": "string", + "title": "Duration", + "description": "FrequencyToMonitorTxs frequency of the resending failed txs", + "default": "1s", + "examples": [ + "1m", + "300ms" + ] + }, + "WaitTxToBeMined": { + "type": "string", + "title": "Duration", + "description": "WaitTxToBeMined time to wait after transaction was sent to the ethereum", + "default": "2m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "PrivateKeys": { + "items": { + "properties": { + "Path": { + "type": "string", + "description": "Path is the file path for the key store file" + }, + "Password": { + "type": "string", + "description": "Password is the password to decrypt the key store file" + } + }, + "additionalProperties": false, + "type": "object", + "description": "KeystoreFileConfig has all the information needed to load a private key from a key store file" + }, + "type": "array", + "description": "PrivateKeys defines all the key store files that are going\nto be read in order to provide the private keys to sign the L1 txs" + }, + "ForcedGas": { + "type": "integer", + "description": "ForcedGas is the amount of gas to be forced in case of gas estimation error", + "default": 0 + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration for ethereum transaction manager" + }, + "Pool": { + "properties": { + "IntervalToRefreshBlockedAddresses": { + "type": "string", + "title": "Duration", + "description": "IntervalToRefreshBlockedAddresses is the time it takes to sync the\nblocked address list from db to memory", + "default": "5m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "IntervalToRefreshGasPrices": { + "type": "string", + "title": "Duration", + "description": "IntervalToRefreshGasPrices is the time to wait to refresh the gas prices", + "default": "5s", + "examples": [ + "1m", + "300ms" + ] + }, + "MaxTxBytesSize": { + "type": "integer", + "description": "MaxTxBytesSize is the max size of a transaction in bytes", + "default": 100132 + }, + "MaxTxDataBytesSize": { + "type": "integer", + "description": "MaxTxDataBytesSize is the max size of the data field of a transaction in bytes", + "default": 100000 + }, + "DB": { + "properties": { + "Name": { + "type": "string", + "description": "Database name", + "default": "pool_db" + }, + "User": { + "type": "string", + "description": "Database User name", + "default": "pool_user" + }, + "Password": { + "type": "string", + "description": "Database Password of the user", + "default": "pool_password" + }, + "Host": { + "type": "string", + "description": "Host address of database", + "default": "zkevm-pool-db" + }, + "Port": { + "type": "string", + "description": "Port Number of database", + "default": "5432" + }, + "EnableLog": { + "type": "boolean", + "description": "EnableLog", + "default": false + }, + "MaxConns": { + "type": "integer", + "description": "MaxConns is the maximum number of connections in the pool.", + "default": 200 + } + }, + "additionalProperties": false, + "type": "object", + "description": "DB is the database configuration" + }, + "DefaultMinGasPriceAllowed": { + "type": "integer", + "description": "DefaultMinGasPriceAllowed is the default min gas price to suggest", + "default": 1000000000 + }, + "MinAllowedGasPriceInterval": { + "type": "string", + "title": "Duration", + "description": "MinAllowedGasPriceInterval is the interval to look back of the suggested min gas price for a tx", + "default": "5m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "PollMinAllowedGasPriceInterval": { + "type": "string", + "title": "Duration", + "description": "PollMinAllowedGasPriceInterval is the interval to poll the suggested min gas price for a tx", + "default": "15s", + "examples": [ + "1m", + "300ms" + ] + }, + "AccountQueue": { + "type": "integer", + "description": "AccountQueue represents the maximum number of non-executable transaction slots permitted per account", + "default": 64 + }, + "GlobalQueue": { + "type": "integer", + "description": "GlobalQueue represents the maximum number of non-executable transaction slots for all accounts", + "default": 1024 + } + }, + "additionalProperties": false, + "type": "object", + "description": "Pool service configuration" + }, + "RPC": { + "properties": { + "Host": { + "type": "string", + "description": "Host defines the network adapter that will be used to serve the HTTP requests", + "default": "0.0.0.0" + }, + "Port": { + "type": "integer", + "description": "Port defines the port to serve the endpoints via HTTP", + "default": 8545 + }, + "ReadTimeout": { + "type": "string", + "title": "Duration", + "description": "ReadTimeout is the HTTP server read timeout\ncheck net/http.server.ReadTimeout and net/http.server.ReadHeaderTimeout", + "default": "1m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "WriteTimeout": { + "type": "string", + "title": "Duration", + "description": "WriteTimeout is the HTTP server write timeout\ncheck net/http.server.WriteTimeout", + "default": "1m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "MaxRequestsPerIPAndSecond": { + "type": "number", + "description": "MaxRequestsPerIPAndSecond defines how much requests a single IP can\nsend within a single second", + "default": 500 + }, + "SequencerNodeURI": { + "type": "string", + "description": "SequencerNodeURI is used allow Non-Sequencer nodes\nto relay transactions to the Sequencer node", + "default": "" + }, + "MaxCumulativeGasUsed": { + "type": "integer", + "description": "MaxCumulativeGasUsed is the max gas allowed per batch", + "default": 0 + }, + "WebSockets": { + "properties": { + "Enabled": { + "type": "boolean", + "description": "Enabled defines if the WebSocket requests are enabled or disabled", + "default": true + }, + "Host": { + "type": "string", + "description": "Host defines the network adapter that will be used to serve the WS requests", + "default": "0.0.0.0" + }, + "Port": { + "type": "integer", + "description": "Port defines the port to serve the endpoints via WS", + "default": 8546 + } + }, + "additionalProperties": false, + "type": "object", + "description": "WebSockets configuration" + }, + "EnableL2SuggestedGasPricePolling": { + "type": "boolean", + "description": "EnableL2SuggestedGasPricePolling enables polling of the L2 gas price to block tx in the RPC with lower gas price.", + "default": true + }, + "TraceBatchUseHTTPS": { + "type": "boolean", + "description": "TraceBatchUseHTTPS enables, in the debug_traceBatchByNum endpoint, the use of the HTTPS protocol (instead of HTTP)\nto do the parallel requests to RPC.debug_traceTransaction endpoint", + "default": true + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration for RPC service. THis one offers a extended Ethereum JSON-RPC API interface to interact with the node" + }, + "Synchronizer": { + "properties": { + "SyncInterval": { + "type": "string", + "title": "Duration", + "description": "SyncInterval is the delay interval between reading new rollup information", + "default": "1s", + "examples": [ + "1m", + "300ms" + ] + }, + "SyncChunkSize": { + "type": "integer", + "description": "SyncChunkSize is the number of blocks to sync on each chunk", + "default": 100 + }, + "TrustedSequencerURL": { + "type": "string", + "description": "TrustedSequencerURL is the rpc url to connect and sync the trusted state", + "default": "" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of service `Syncrhonizer`. For this service is also really important the value of `IsTrustedSequencer`\nbecause depending of this values is going to ask to a trusted node for trusted transactions or not" + }, + "Sequencer": { + "properties": { + "WaitPeriodPoolIsEmpty": { + "type": "string", + "title": "Duration", + "description": "WaitPeriodPoolIsEmpty is the time the sequencer waits until\ntrying to add new txs to the state", + "default": "1s", + "examples": [ + "1m", + "300ms" + ] + }, + "BlocksAmountForTxsToBeDeleted": { + "type": "integer", + "description": "BlocksAmountForTxsToBeDeleted is blocks amount after which txs will be deleted from the pool", + "default": 100 + }, + "FrequencyToCheckTxsForDelete": { + "type": "string", + "title": "Duration", + "description": "FrequencyToCheckTxsForDelete is frequency with which txs will be checked for deleting", + "default": "12h0m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "MaxTxsPerBatch": { + "type": "integer", + "description": "MaxTxsPerBatch is the maximum amount of transactions in the batch", + "default": 300 + }, + "MaxBatchBytesSize": { + "type": "integer", + "description": "MaxBatchBytesSize is the maximum batch size in bytes\n(subtracted bits of all types.Sequence fields excluding BatchL2Data from MaxTxSizeForL1)", + "default": 120000 + }, + "MaxCumulativeGasUsed": { + "type": "integer", + "description": "MaxCumulativeGasUsed is max gas amount used by batch", + "default": 30000000 + }, + "MaxKeccakHashes": { + "type": "integer", + "description": "MaxKeccakHashes is max keccak hashes used by batch", + "default": 2145 + }, + "MaxPoseidonHashes": { + "type": "integer", + "description": "MaxPoseidonHashes is max poseidon hashes batch can handle", + "default": 252357 + }, + "MaxPoseidonPaddings": { + "type": "integer", + "description": "MaxPoseidonPaddings is max poseidon paddings batch can handle", + "default": 135191 + }, + "MaxMemAligns": { + "type": "integer", + "description": "MaxMemAligns is max mem aligns batch can handle", + "default": 236585 + }, + "MaxArithmetics": { + "type": "integer", + "description": "MaxArithmetics is max arithmetics batch can handle", + "default": 236585 + }, + "MaxBinaries": { + "type": "integer", + "description": "MaxBinaries is max binaries batch can handle", + "default": 473170 + }, + "MaxSteps": { + "type": "integer", + "description": "MaxSteps is max steps batch can handle", + "default": 7570538 + }, + "WeightBatchBytesSize": { + "type": "integer", + "description": "WeightBatchBytesSize is the cost weight for the BatchBytesSize batch resource", + "default": 1 + }, + "WeightCumulativeGasUsed": { + "type": "integer", + "description": "WeightCumulativeGasUsed is the cost weight for the CumulativeGasUsed batch resource", + "default": 1 + }, + "WeightKeccakHashes": { + "type": "integer", + "description": "WeightKeccakHashes is the cost weight for the KeccakHashes batch resource", + "default": 1 + }, + "WeightPoseidonHashes": { + "type": "integer", + "description": "WeightPoseidonHashes is the cost weight for the PoseidonHashes batch resource", + "default": 1 + }, + "WeightPoseidonPaddings": { + "type": "integer", + "description": "WeightPoseidonPaddings is the cost weight for the PoseidonPaddings batch resource", + "default": 1 + }, + "WeightMemAligns": { + "type": "integer", + "description": "WeightMemAligns is the cost weight for the MemAligns batch resource", + "default": 1 + }, + "WeightArithmetics": { + "type": "integer", + "description": "WeightArithmetics is the cost weight for the Arithmetics batch resource", + "default": 1 + }, + "WeightBinaries": { + "type": "integer", + "description": "WeightBinaries is the cost weight for the Binaries batch resource", + "default": 1 + }, + "WeightSteps": { + "type": "integer", + "description": "WeightSteps is the cost weight for the Steps batch resource", + "default": 1 + }, + "TxLifetimeCheckTimeout": { + "type": "string", + "title": "Duration", + "description": "TxLifetimeCheckTimeout is the time the sequencer waits to check txs lifetime", + "default": "10m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "MaxTxLifetime": { + "type": "string", + "title": "Duration", + "description": "MaxTxLifetime is the time a tx can be in the sequencer memory", + "default": "3h0m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "Finalizer": { + "properties": { + "GERDeadlineTimeout": { + "type": "string", + "title": "Duration", + "description": "GERDeadlineTimeout is the time the finalizer waits after receiving closing signal to update Global Exit Root", + "default": "5s", + "examples": [ + "1m", + "300ms" + ] + }, + "ForcedBatchDeadlineTimeout": { + "type": "string", + "title": "Duration", + "description": "ForcedBatchDeadlineTimeout is the time the finalizer waits after receiving closing signal to process Forced Batches", + "default": "1m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "SleepDuration": { + "type": "string", + "title": "Duration", + "description": "SleepDuration is the time the finalizer sleeps between each iteration, if there are no transactions to be processed", + "default": "100ms", + "examples": [ + "1m", + "300ms" + ] + }, + "ResourcePercentageToCloseBatch": { + "type": "integer", + "description": "ResourcePercentageToCloseBatch is the percentage window of the resource left out for the batch to be closed", + "default": 10 + }, + "GERFinalityNumberOfBlocks": { + "type": "integer", + "description": "GERFinalityNumberOfBlocks is number of blocks to consider GER final", + "default": 64 + }, + "ClosingSignalsManagerWaitForCheckingL1Timeout": { + "type": "string", + "title": "Duration", + "description": "ClosingSignalsManagerWaitForCheckingL1Timeout is used by the closing signals manager to wait for its operation", + "default": "10s", + "examples": [ + "1m", + "300ms" + ] + }, + "ClosingSignalsManagerWaitForCheckingGER": { + "type": "string", + "title": "Duration", + "description": "ClosingSignalsManagerWaitForCheckingGER is used by the closing signals manager to wait for its operation", + "default": "10s", + "examples": [ + "1m", + "300ms" + ] + }, + "ClosingSignalsManagerWaitForCheckingForcedBatches": { + "type": "string", + "title": "Duration", + "description": "ClosingSignalsManagerWaitForCheckingL1Timeout is used by the closing signals manager to wait for its operation", + "default": "10s", + "examples": [ + "1m", + "300ms" + ] + }, + "ForcedBatchesFinalityNumberOfBlocks": { + "type": "integer", + "description": "ForcedBatchesFinalityNumberOfBlocks is number of blocks to consider GER final", + "default": 64 + }, + "TimestampResolution": { + "type": "string", + "title": "Duration", + "description": "TimestampResolution is the resolution of the timestamp used to close a batch", + "default": "10s", + "examples": [ + "1m", + "300ms" + ] + }, + "StopSequencerOnBatchNum": { + "type": "integer", + "description": "StopSequencerOnBatchNum specifies the batch number where the Sequencer will stop to process more transactions and generate new batches. The Sequencer will halt after it closes the batch equal to this number", + "default": 0 + } + }, + "additionalProperties": false, + "type": "object", + "description": "Finalizer's specific config properties" + }, + "DBManager": { + "properties": { + "PoolRetrievalInterval": { + "type": "string", + "title": "Duration", + "default": "500ms", + "examples": [ + "1m", + "300ms" + ] + }, + "L2ReorgRetrievalInterval": { + "type": "string", + "title": "Duration", + "default": "5s", + "examples": [ + "1m", + "300ms" + ] + } + }, + "additionalProperties": false, + "type": "object", + "description": "DBManager's specific config properties" + }, + "Worker": { + "properties": { + "ResourceCostMultiplier": { + "type": "number", + "description": "ResourceCostMultiplier is the multiplier for the resource cost", + "default": 1000 + } + }, + "additionalProperties": false, + "type": "object", + "description": "Worker's specific config properties" + }, + "EffectiveGasPrice": { + "properties": { + "MaxBreakEvenGasPriceDeviationPercentage": { + "type": "integer", + "description": "MaxBreakEvenGasPriceDeviationPercentage is the max allowed deviation percentage BreakEvenGasPrice on re-calculation", + "default": 10 + }, + "L1GasPriceFactor": { + "type": "number", + "description": "L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price", + "default": 0.25 + }, + "ByteGasCost": { + "type": "integer", + "description": "ByteGasCost is the gas cost per byte", + "default": 16 + }, + "MarginFactor": { + "type": "number", + "description": "MarginFactor is the margin factor percentage to be added to the L2 min gas price", + "default": 1 + }, + "Enabled": { + "type": "boolean", + "description": "Enabled is a flag to enable/disable the effective gas price", + "default": false + }, + "DefaultMinGasPriceAllowed": { + "type": "integer", + "description": "DefaultMinGasPriceAllowed is the default min gas price to suggest\nThis value is assigned from [Pool].DefaultMinGasPriceAllowed", + "default": 0 + } + }, + "additionalProperties": false, + "type": "object", + "description": "EffectiveGasPrice is the config for the gas price" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the sequencer service" + }, + "SequenceSender": { + "properties": { + "WaitPeriodSendSequence": { + "type": "string", + "title": "Duration", + "description": "WaitPeriodSendSequence is the time the sequencer waits until\ntrying to send a sequence to L1", + "default": "5s", + "examples": [ + "1m", + "300ms" + ] + }, + "LastBatchVirtualizationTimeMaxWaitPeriod": { + "type": "string", + "title": "Duration", + "description": "LastBatchVirtualizationTimeMaxWaitPeriod is time since sequences should be sent", + "default": "5s", + "examples": [ + "1m", + "300ms" + ] + }, + "MaxTxSizeForL1": { + "type": "integer", + "description": "MaxTxSizeForL1 is the maximum size a single transaction can have. This field has\nnon-trivial consequences: larger transactions than 128KB are significantly harder and\nmore expensive to propagate; larger transactions also take more resources\nto validate whether they fit into the pool or not.", + "default": 131072 + }, + "SenderAddress": { + "type": "string", + "description": "SenderAddress defines which private key the eth tx manager needs to use\nto sign the L1 txs", + "default": "0xf39fd6e51aad88f6f4ce6ab8827279cfffb92266" + }, + "PrivateKeys": { + "items": { + "properties": { + "Path": { + "type": "string", + "description": "Path is the file path for the key store file" + }, + "Password": { + "type": "string", + "description": "Password is the password to decrypt the key store file" + } + }, + "additionalProperties": false, + "type": "object", + "description": "KeystoreFileConfig has all the information needed to load a private key from a key store file" + }, + "type": "array", + "description": "PrivateKeys defines all the key store files that are going\nto be read in order to provide the private keys to sign the L1 txs", + "default": [ + { + "Path": "/pk/sequencer.keystore", + "Password": "testonly" + } + ] + }, + "ForkUpgradeBatchNumber": { + "type": "integer", + "description": "Batch number where there is a forkid change (fork upgrade)", + "default": 0 + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the sequence sender service" + }, + "Aggregator": { + "properties": { + "Host": { + "type": "string", + "description": "Host for the grpc server", + "default": "0.0.0.0" + }, + "Port": { + "type": "integer", + "description": "Port for the grpc server", + "default": 50081 + }, + "RetryTime": { + "type": "string", + "title": "Duration", + "description": "RetryTime is the time the aggregator main loop sleeps if there are no proofs to aggregate\nor batches to generate proofs. It is also used in the isSynced loop", + "default": "5s", + "examples": [ + "1m", + "300ms" + ] + }, + "VerifyProofInterval": { + "type": "string", + "title": "Duration", + "description": "VerifyProofInterval is the interval of time to verify/send an proof in L1", + "default": "1m30s", + "examples": [ + "1m", + "300ms" + ] + }, + "ProofStatePollingInterval": { + "type": "string", + "title": "Duration", + "description": "ProofStatePollingInterval is the interval time to polling the prover about the generation state of a proof", + "default": "5s", + "examples": [ + "1m", + "300ms" + ] + }, + "TxProfitabilityCheckerType": { + "type": "string", + "description": "TxProfitabilityCheckerType type for checking is it profitable for aggregator to validate batch\npossible values: base/acceptall", + "default": "acceptall" + }, + "TxProfitabilityMinReward": { + "properties": {}, + "additionalProperties": false, + "type": "object", + "description": "TxProfitabilityMinReward min reward for base tx profitability checker when aggregator will validate batch\nthis parameter is used for the base tx profitability checker" + }, + "IntervalAfterWhichBatchConsolidateAnyway": { + "type": "string", + "title": "Duration", + "description": "IntervalAfterWhichBatchConsolidateAnyway this is interval for the main sequencer, that will check if there is no transactions", + "default": "0s", + "examples": [ + "1m", + "300ms" + ] + }, + "ChainID": { + "type": "integer", + "description": "ChainID is the L2 ChainID provided by the Network Config", + "default": 0 + }, + "ForkId": { + "type": "integer", + "description": "ForkID is the L2 ForkID provided by the Network Config", + "default": 0 + }, + "SenderAddress": { + "type": "string", + "description": "SenderAddress defines which private key the eth tx manager needs to use\nto sign the L1 txs", + "default": "" + }, + "CleanupLockedProofsInterval": { + "type": "string", + "title": "Duration", + "description": "CleanupLockedProofsInterval is the interval of time to clean up locked proofs.", + "default": "2m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "GeneratingProofCleanupThreshold": { + "type": "string", + "description": "GeneratingProofCleanupThreshold represents the time interval after\nwhich a proof in generating state is considered to be stuck and\nallowed to be cleared.", + "default": "10m" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the aggregator service" + }, + "NetworkConfig": { + "properties": { + "l1Config": { + "properties": { + "chainId": { + "type": "integer", + "description": "Chain ID of the L1 network", + "default": 0 + }, + "polygonZkEVMAddress": { + "items": { + "type": "integer" + }, + "type": "array", + "maxItems": 20, + "minItems": 20, + "description": "Address of the L1 contract" + }, + "maticTokenAddress": { + "items": { + "type": "integer" + }, + "type": "array", + "maxItems": 20, + "minItems": 20, + "description": "Address of the L1 Matic token Contract" + }, + "polygonZkEVMGlobalExitRootAddress": { + "items": { + "type": "integer" + }, + "type": "array", + "maxItems": 20, + "minItems": 20, + "description": "Address of the L1 GlobalExitRootManager contract" + } + }, + "additionalProperties": false, + "type": "object", + "description": "L1: Configuration related to L1" + }, + "L2GlobalExitRootManagerAddr": { + "items": { + "type": "integer" + }, + "type": "array", + "maxItems": 20, + "minItems": 20, + "description": "DEPRECATED L2: address of the `PolygonZkEVMGlobalExitRootL2 proxy` smart contract" + }, + "L2BridgeAddr": { + "items": { + "type": "integer" + }, + "type": "array", + "maxItems": 20, + "minItems": 20, + "description": "L2: address of the `PolygonZkEVMBridge proxy` smart contract" + }, + "Genesis": { + "properties": { + "GenesisBlockNum": { + "type": "integer", + "description": "GenesisBlockNum is the block number where the polygonZKEVM smc was deployed on L1", + "default": 0 + }, + "Root": { + "items": { + "type": "integer" + }, + "type": "array", + "maxItems": 32, + "minItems": 32, + "description": "Root hash of the genesis block" + }, + "GenesisActions": { + "items": { + "properties": { + "address": { + "type": "string" + }, + "type": { + "type": "integer" + }, + "storagePosition": { + "type": "string" + }, + "bytecode": { + "type": "string" + }, + "key": { + "type": "string" + }, + "value": { + "type": "string" + }, + "root": { + "type": "string" + } + }, + "additionalProperties": false, + "type": "object", + "description": "GenesisAction represents one of the values set on the SMT during genesis." + }, + "type": "array", + "description": "Contracts to be deployed to L2" + } + }, + "additionalProperties": false, + "type": "object", + "description": "L1: Genesis of the rollup, first block number and root" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the genesis of the network. This is used to known the initial state of the network" + }, + "L2GasPriceSuggester": { + "properties": { + "Type": { + "type": "string", + "default": "follower" + }, + "DefaultGasPriceWei": { + "type": "integer", + "description": "DefaultGasPriceWei is used to set the gas price to be used by the default gas pricer or as minimim gas price by the follower gas pricer.", + "default": 2000000000 + }, + "MaxGasPriceWei": { + "type": "integer", + "description": "MaxGasPriceWei is used to limit the gas price returned by the follower gas pricer to a maximum value. It is ignored if 0.", + "default": 0 + }, + "MaxPrice": { + "properties": {}, + "additionalProperties": false, + "type": "object" + }, + "IgnorePrice": { + "properties": {}, + "additionalProperties": false, + "type": "object" + }, + "CheckBlocks": { + "type": "integer", + "default": 0 + }, + "Percentile": { + "type": "integer", + "default": 0 + }, + "UpdatePeriod": { + "type": "string", + "title": "Duration", + "default": "10s", + "examples": [ + "1m", + "300ms" + ] + }, + "CleanHistoryPeriod": { + "type": "string", + "title": "Duration", + "default": "1h0m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "CleanHistoryTimeRetention": { + "type": "string", + "title": "Duration", + "default": "5m0s", + "examples": [ + "1m", + "300ms" + ] + }, + "Factor": { + "type": "number", + "default": 0.15 + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the gas price suggester service" + }, + "Executor": { + "properties": { + "URI": { + "type": "string", + "default": "zkevm-prover:50071" + }, + "MaxResourceExhaustedAttempts": { + "type": "integer", + "description": "MaxResourceExhaustedAttempts is the max number of attempts to make a transaction succeed because of resource exhaustion", + "default": 3 + }, + "WaitOnResourceExhaustion": { + "type": "string", + "title": "Duration", + "description": "WaitOnResourceExhaustion is the time to wait before retrying a transaction because of resource exhaustion", + "default": "1s", + "examples": [ + "1m", + "300ms" + ] + }, + "MaxGRPCMessageSize": { + "type": "integer", + "default": 100000000 + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the executor service" + }, + "MTClient": { + "properties": { + "URI": { + "type": "string", + "description": "URI is the server URI.", + "default": "zkevm-prover:50061" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the merkle tree client service. Not use in the node, only for testing" + }, + "StateDB": { + "properties": { + "Name": { + "type": "string", + "description": "Database name", + "default": "state_db" + }, + "User": { + "type": "string", + "description": "Database User name", + "default": "state_user" + }, + "Password": { + "type": "string", + "description": "Database Password of the user", + "default": "state_password" + }, + "Host": { + "type": "string", + "description": "Host address of database", + "default": "zkevm-state-db" + }, + "Port": { + "type": "string", + "description": "Port Number of database", + "default": "5432" + }, + "EnableLog": { + "type": "boolean", + "description": "EnableLog", + "default": false + }, + "MaxConns": { + "type": "integer", + "description": "MaxConns is the maximum number of connections in the pool.", + "default": 200 + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the state database connection" + }, + "Metrics": { + "properties": { + "Host": { + "type": "string", + "description": "Host is the address to bind the metrics server", + "default": "0.0.0.0" + }, + "Port": { + "type": "integer", + "description": "Port is the port to bind the metrics server", + "default": 9091 + }, + "Enabled": { + "type": "boolean", + "description": "Enabled is the flag to enable/disable the metrics server", + "default": false + }, + "ProfilingHost": { + "type": "string", + "description": "ProfilingHost is the address to bind the profiling server", + "default": "" + }, + "ProfilingPort": { + "type": "integer", + "description": "ProfilingPort is the port to bind the profiling server", + "default": 0 + }, + "ProfilingEnabled": { + "type": "boolean", + "description": "ProfilingEnabled is the flag to enable/disable the profiling server", + "default": false + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the metrics service, basically is where is going to publish the metrics" + }, + "EventLog": { + "properties": { + "DB": { + "properties": { + "Name": { + "type": "string", + "description": "Database name", + "default": "" + }, + "User": { + "type": "string", + "description": "Database User name", + "default": "" + }, + "Password": { + "type": "string", + "description": "Database Password of the user", + "default": "" + }, + "Host": { + "type": "string", + "description": "Host address of database", + "default": "" + }, + "Port": { + "type": "string", + "description": "Port Number of database", + "default": "" + }, + "EnableLog": { + "type": "boolean", + "description": "EnableLog", + "default": false + }, + "MaxConns": { + "type": "integer", + "description": "MaxConns is the maximum number of connections in the pool.", + "default": 0 + } + }, + "additionalProperties": false, + "type": "object", + "description": "DB is the database configuration" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the event database connection" + }, + "HashDB": { + "properties": { + "Name": { + "type": "string", + "description": "Database name", + "default": "prover_db" + }, + "User": { + "type": "string", + "description": "Database User name", + "default": "prover_user" + }, + "Password": { + "type": "string", + "description": "Database Password of the user", + "default": "prover_pass" + }, + "Host": { + "type": "string", + "description": "Host address of database", + "default": "zkevm-state-db" + }, + "Port": { + "type": "string", + "description": "Port Number of database", + "default": "5432" + }, + "EnableLog": { + "type": "boolean", + "description": "EnableLog", + "default": false + }, + "MaxConns": { + "type": "integer", + "description": "MaxConns is the maximum number of connections in the pool.", + "default": 200 + } + }, + "additionalProperties": false, + "type": "object", + "description": "Configuration of the hash database connection" + } + }, + "additionalProperties": false, + "type": "object", + "description": "Config represents the configuration of the entire Hermez Node The file is TOML format You could find some examples:\n\n[TOML format]: https://en.wikipedia.org/wiki/TOML" +} \ No newline at end of file diff --git a/docs/config-file/schema_doc.css b/docs/config-file/schema_doc.css new file mode 100644 index 0000000000..7896225bee --- /dev/null +++ b/docs/config-file/schema_doc.css @@ -0,0 +1,192 @@ +body { + font: 16px/1.5em "Overpass", "Open Sans", Helvetica, sans-serif; + color: #333; + font-weight: 300; + padding: 40px; +} + +.btn.btn-link { + font-size: 18px; + user-select: text; +} + +.jsfh-animated-property { + animation: eclair; + animation-iteration-count: 1; + animation-fill-mode: forwards; + animation-duration: .75s; + +} + +@keyframes eclair { + 0%,100% { + transform: scale(1); + } + 50% { + transform: scale(1.03); + } +} + +.btn.btn-primary { + margin: 10px; +} + +.btn.example-show.collapsed:before { + content: "show" +} + +.btn.example-show:before { + content: "hide" +} + +.description.collapse:not(.show) { + max-height: 100px !important; + overflow: hidden; + + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; +} + +.description.collapsing { + min-height: 100px !important; +} + +.collapse-description-link.collapsed:after { + content: '+ Read More'; +} + +.collapse-description-link:not(.collapsed):after { + content: '- Read Less'; +} + +.badge { + font-size: 100%; + margin-bottom: 0.5rem; + margin-top: 0.5rem; +} + +.badge.value-type { + font-size: 120%; + margin-left: 5px; + margin-right: 5px; + margin-bottom: 10px; +} + + +.badge.default-value { + font-size: 120%; + margin-left: 5px; + margin-bottom: 10px; +} + +.badge.restriction { + display: inline-block; +} + +.badge.required-property,.badge.deprecated-property,.badge.pattern-property,.badge.no-additional { + font-size: 100%; + margin-left: 10px; +} + +.accordion div.card:only-child { + border-bottom: 1px solid rgba(0, 0, 0, 0.125); +} + +.examples { + padding: 1rem !important; +} + +.examples pre { + margin-bottom: 0; +} + +.highlight.jumbotron { + padding: 1rem !important; +} + +.generated-by-footer { + margin-top: 1em; + text-align: right; +} + +.breadcrumbs { + font-family: monospace; + background: #e9ecef; + font-size: 100%; + margin-left: 10px; + font-weight: 500; + display: inline-block; + margin: 20px +} + +/* From https://github.com/richleland/pygments-css/blob/master/friendly.css, see https://github.com/trentm/python-markdown2/wiki/fenced-code-blocks */ +.highlight { background: #e9ecef; } /* Changed from #f0f0f0 in the original style to be the same as bootstrap's jumbotron */ +.highlight .hll { background-color: #ffffcc } +.highlight .c { color: #60a0b0; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #007020; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #60a0b0; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #60a0b0; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #007020 } /* Comment.Preproc */ +.highlight .cpf { color: #60a0b0; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #60a0b0; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #60a0b0; background-color: #fff0f0 } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gr { color: #FF0000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #888888 } /* Generic.Output */ +.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #007020 } /* Keyword.Pseudo */ +.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #902000 } /* Keyword.Type */ +.highlight .m { color: #40a070 } /* Literal.Number */ +.highlight .s { color: #4070a0 } /* Literal.String */ +.highlight .na { color: #4070a0 } /* Name.Attribute */ +.highlight .nb { color: #007020 } /* Name.Builtin */ +.highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ +.highlight .no { color: #60add5 } /* Name.Constant */ +.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ +.highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #007020 } /* Name.Exception */ +.highlight .nf { color: #06287e } /* Name.Function */ +.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ +.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #bb60d5 } /* Name.Variable */ +.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #40a070 } /* Literal.Number.Bin */ +.highlight .mf { color: #40a070 } /* Literal.Number.Float */ +.highlight .mh { color: #40a070 } /* Literal.Number.Hex */ +.highlight .mi { color: #40a070 } /* Literal.Number.Integer */ +.highlight .mo { color: #40a070 } /* Literal.Number.Oct */ +.highlight .sa { color: #4070a0 } /* Literal.String.Affix */ +.highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ +.highlight .sc { color: #4070a0 } /* Literal.String.Char */ +.highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */ +.highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4070a0 } /* Literal.String.Double */ +.highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ +.highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ +.highlight .sx { color: #c65d09 } /* Literal.String.Other */ +.highlight .sr { color: #235388 } /* Literal.String.Regex */ +.highlight .s1 { color: #4070a0 } /* Literal.String.Single */ +.highlight .ss { color: #517918 } /* Literal.String.Symbol */ +.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #06287e } /* Name.Function.Magic */ +.highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ +.highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ +.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ +.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */ +.highlight .il { color: #40a070 } /* Literal.Number.Integer.Long */ diff --git a/docs/config-file/schema_doc.min.js b/docs/config-file/schema_doc.min.js new file mode 100644 index 0000000000..17eceaf59a --- /dev/null +++ b/docs/config-file/schema_doc.min.js @@ -0,0 +1 @@ +$(document).on("click",'a[href^="#"]',function(event){event.preventDefault();history.pushState({},"",this.href)});function flashElement(elementId){myElement=document.getElementById(elementId);myElement.classList.add("jsfh-animated-property");setTimeout(function(){myElement.classList.remove("jsfh-animated-property")},1e3)}function setAnchor(anchorLinkDestination){history.pushState({},"",anchorLinkDestination)}function anchorOnLoad(){let linkTarget=decodeURIComponent(window.location.hash.split("?")[0].split("&")[0]);if(linkTarget[0]==="#"){linkTarget=linkTarget.substr(1)}if(linkTarget.length>0){anchorLink(linkTarget)}}function anchorLink(linkTarget){const target=$("#"+linkTarget);target.parents().addBack().filter(".collapse:not(.show), .tab-pane, [role='tab']").each(function(index){if($(this).hasClass("collapse")){$(this).collapse("show")}else if($(this).hasClass("tab-pane")){const tabToShow=$("a[href='#"+$(this).attr("id")+"']");if(tabToShow){tabToShow.tab("show")}}else if($(this).attr("role")==="tab"){$(this).tab("show")}});setTimeout(function(){let targetElement=document.getElementById(linkTarget);if(targetElement){targetElement.scrollIntoView({block:"center",behavior:"smooth"});setTimeout(function(){flashElement(linkTarget)},500)}},1e3)} \ No newline at end of file diff --git a/docs/config-file/templates/js/badge_type.html b/docs/config-file/templates/js/badge_type.html new file mode 100644 index 0000000000..32b3ec5b99 --- /dev/null +++ b/docs/config-file/templates/js/badge_type.html @@ -0,0 +1,14 @@ +{%- if type_name == "string" -%} + {%- if schema.kw_min_length -%} + {{ restriction("Must be at least " ~ schema.kw_min_length.literal ~ " characters long", "min-length", schema.kw_min_length.html_id) }} + {%- endif -%} + {%- if schema.kw_max_length -%} + {{ restriction("Must be at most " ~ schema.kw_max_length.literal ~ " characters long", "max-length", schema.kw_max_length.html_id) }} + {%- endif -%} +{%- endif -%} +{%- if type_name in ["integer", "number"] -%} + {%- set restriction_text = (schema | get_numeric_restrictions_text("", "")) -%} + {%- if restriction_text -%} + {{ restriction(schema | get_numeric_restrictions_text("", ""), "numeric", schema.html_id ~ "_number") }} + {%- endif -%} +{%- endif -%} diff --git a/docs/config-file/templates/js/base.html b/docs/config-file/templates/js/base.html new file mode 100644 index 0000000000..ec629067c5 --- /dev/null +++ b/docs/config-file/templates/js/base.html @@ -0,0 +1,42 @@ +{% from 'content.html' import content with context %} + + + + + + + + + + + + + {%- set title = schema.keywords.get("title") -%} + {% if title %} + {% set title = title.literal %} + {% endif %} + {%- if title -%}{{ title }}{%- else -%}Schema Docs{%- endif -%} + + + {%- if title -%} +

{{ title }}

+ {%- endif -%} + {%- if config.expand_buttons -%} +
+ + +
+ {%- endif -%} + + {%- set description = (schema | get_description) -%} + {%- include "section_description.html" -%} + + {{ content(schema,False, True) }} + + {% if config.with_footer -%} + + {%- endif -%} + + \ No newline at end of file diff --git a/docs/config-file/templates/js/breadcrumbs_no_object.html b/docs/config-file/templates/js/breadcrumbs_no_object.html new file mode 100644 index 0000000000..df5787f9e3 --- /dev/null +++ b/docs/config-file/templates/js/breadcrumbs_no_object.html @@ -0,0 +1,10 @@ + \ No newline at end of file diff --git a/docs/config-file/templates/js/breadcrumbs_object.html b/docs/config-file/templates/js/breadcrumbs_object.html new file mode 100644 index 0000000000..6e1562e095 --- /dev/null +++ b/docs/config-file/templates/js/breadcrumbs_object.html @@ -0,0 +1,15 @@ + \ No newline at end of file diff --git a/docs/config-file/templates/js/content.html b/docs/config-file/templates/js/content.html new file mode 100644 index 0000000000..f428997837 --- /dev/null +++ b/docs/config-file/templates/js/content.html @@ -0,0 +1,120 @@ +{% from "macro_restriction.html" import restriction with context %} + +{%- macro tabbed_section(operator, current_node) -%} + {% include "tabbed_section.html" %} +{%- endmacro -%} + +{%- macro content(schema, skip_headers=False, skip_type=False) -%} + {% set keys = schema.keywords %} + + {# Resolve type #} + {%- set type_name = schema.type_name -%} + + {% if not skip_headers and type_name != "object" %} + {%- if config.show_breadcrumbs -%} + {%- if type_name == "object" -%} + {%- include "breadcrumbs_object.html" -%} + {%- else -%} + {%- include "breadcrumbs_no_object.html" -%} + {%- endif -%} + + {%- endif -%} + + + + {# Display default #} + {%- set default_value = schema.default_value -%} + {%- if default_value -%} + {{ " " }}Default: {{ default_value }} + {%- endif -%} + + {# Display type #} + {%- if not schema is combining and not skip_type -%} + Type: {{ type_name }} + {%- endif -%} + + {%- if schema.format -%} + Format: {{ schema.format }} + {%- endif -%} +
+ + {%- set description = (schema | get_description) -%} + {%- include "section_description.html" -%} + {%- endif -%} + + + {%- if schema.should_be_a_link(config) -%} + Same definition as {{ schema.links_to.link_name }} + {%- elif schema.refers_to -%} + {{ content(schema.refers_to_merged, True) }} + {%- else -%} + + + + {# Combining: allOf, anyOf, oneOf, not #} + {%- if schema.kw_all_of -%} +
{{ tabbed_section("allOf", schema.kw_all_of) }}
+ {%- endif -%} + {%- if schema.kw_any_of -%} +
{{ tabbed_section("anyOf", schema.kw_any_of) }}
+ {%- endif -%} + {%- if schema.kw_one_of -%} +
{{ tabbed_section("oneOf", schema.kw_one_of) }}
+ {%- endif -%} + {%- if schema.kw_not -%} + {% include "section_not.html" %} + {%- endif -%} + + {# Enum and const #} + {%- if schema.kw_enum -%} +
+

Must be one of:

+ +
+ {%- endif -%} + {%- if schema.kw_const -%} + Specific value: {{ schema.kw_const.raw | python_to_json }} + {%- endif -%} + + {# Pattern (Regular Expression) #} + {%- if schema.kw_pattern -%} + Must match regular expression: {{ schema.kw_pattern.literal | escape }} + {%- endif -%} + + {# Conditional subschema, or if-then-else section #} + {%- if schema.has_conditional -%} + {% include "section_conditional_subschema.html" %} + {%- endif -%} + + {# Required properties that are not defined under "properties". They will only be listed #} + {% include "section_undocumented_required_properties.html" %} + + {# Show the requested type(s) #} + {% include "badge_type.html" %} + + {# Show array restrictions #} + {%- if type_name.startswith("array") -%} + {% include "section_array.html" %} + {%- endif -%} + + {# Display examples #} + {%- set examples = schema.examples -%} + {%- if examples -%} + {% include "section_examples.html" %} + {%- endif -%} + + + {# Properties, pattern properties, additional properties #} + {%- for sub_property in schema.iterate_properties -%} + {%- if sub_property.type_name == "object" -%} + {% include "section_properties_object.html" %} + {%- else -%} + {% include "section_properties_2.html" %} + {%- endif -%} + {%- endfor -%} + {%- endif -%} +{%- endmacro -%} diff --git a/docs/config-file/templates/js/macro_restriction.html b/docs/config-file/templates/js/macro_restriction.html new file mode 100644 index 0000000000..fdec2cbbe8 --- /dev/null +++ b/docs/config-file/templates/js/macro_restriction.html @@ -0,0 +1,3 @@ +{%- macro restriction(inner_text, css_class_name, html_id) -%} +

{{ inner_text }}

+{%- endmacro -%} \ No newline at end of file diff --git a/docs/config-file/templates/js/schema_doc.css b/docs/config-file/templates/js/schema_doc.css new file mode 100644 index 0000000000..7896225bee --- /dev/null +++ b/docs/config-file/templates/js/schema_doc.css @@ -0,0 +1,192 @@ +body { + font: 16px/1.5em "Overpass", "Open Sans", Helvetica, sans-serif; + color: #333; + font-weight: 300; + padding: 40px; +} + +.btn.btn-link { + font-size: 18px; + user-select: text; +} + +.jsfh-animated-property { + animation: eclair; + animation-iteration-count: 1; + animation-fill-mode: forwards; + animation-duration: .75s; + +} + +@keyframes eclair { + 0%,100% { + transform: scale(1); + } + 50% { + transform: scale(1.03); + } +} + +.btn.btn-primary { + margin: 10px; +} + +.btn.example-show.collapsed:before { + content: "show" +} + +.btn.example-show:before { + content: "hide" +} + +.description.collapse:not(.show) { + max-height: 100px !important; + overflow: hidden; + + display: -webkit-box; + -webkit-line-clamp: 2; + -webkit-box-orient: vertical; +} + +.description.collapsing { + min-height: 100px !important; +} + +.collapse-description-link.collapsed:after { + content: '+ Read More'; +} + +.collapse-description-link:not(.collapsed):after { + content: '- Read Less'; +} + +.badge { + font-size: 100%; + margin-bottom: 0.5rem; + margin-top: 0.5rem; +} + +.badge.value-type { + font-size: 120%; + margin-left: 5px; + margin-right: 5px; + margin-bottom: 10px; +} + + +.badge.default-value { + font-size: 120%; + margin-left: 5px; + margin-bottom: 10px; +} + +.badge.restriction { + display: inline-block; +} + +.badge.required-property,.badge.deprecated-property,.badge.pattern-property,.badge.no-additional { + font-size: 100%; + margin-left: 10px; +} + +.accordion div.card:only-child { + border-bottom: 1px solid rgba(0, 0, 0, 0.125); +} + +.examples { + padding: 1rem !important; +} + +.examples pre { + margin-bottom: 0; +} + +.highlight.jumbotron { + padding: 1rem !important; +} + +.generated-by-footer { + margin-top: 1em; + text-align: right; +} + +.breadcrumbs { + font-family: monospace; + background: #e9ecef; + font-size: 100%; + margin-left: 10px; + font-weight: 500; + display: inline-block; + margin: 20px +} + +/* From https://github.com/richleland/pygments-css/blob/master/friendly.css, see https://github.com/trentm/python-markdown2/wiki/fenced-code-blocks */ +.highlight { background: #e9ecef; } /* Changed from #f0f0f0 in the original style to be the same as bootstrap's jumbotron */ +.highlight .hll { background-color: #ffffcc } +.highlight .c { color: #60a0b0; font-style: italic } /* Comment */ +.highlight .err { border: 1px solid #FF0000 } /* Error */ +.highlight .k { color: #007020; font-weight: bold } /* Keyword */ +.highlight .o { color: #666666 } /* Operator */ +.highlight .ch { color: #60a0b0; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #60a0b0; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #007020 } /* Comment.Preproc */ +.highlight .cpf { color: #60a0b0; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #60a0b0; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #60a0b0; background-color: #fff0f0 } /* Comment.Special */ +.highlight .gd { color: #A00000 } /* Generic.Deleted */ +.highlight .ge { font-style: italic } /* Generic.Emph */ +.highlight .gr { color: #FF0000 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #888888 } /* Generic.Output */ +.highlight .gp { color: #c65d09; font-weight: bold } /* Generic.Prompt */ +.highlight .gs { font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #0044DD } /* Generic.Traceback */ +.highlight .kc { color: #007020; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #007020; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #007020; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #007020 } /* Keyword.Pseudo */ +.highlight .kr { color: #007020; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #902000 } /* Keyword.Type */ +.highlight .m { color: #40a070 } /* Literal.Number */ +.highlight .s { color: #4070a0 } /* Literal.String */ +.highlight .na { color: #4070a0 } /* Name.Attribute */ +.highlight .nb { color: #007020 } /* Name.Builtin */ +.highlight .nc { color: #0e84b5; font-weight: bold } /* Name.Class */ +.highlight .no { color: #60add5 } /* Name.Constant */ +.highlight .nd { color: #555555; font-weight: bold } /* Name.Decorator */ +.highlight .ni { color: #d55537; font-weight: bold } /* Name.Entity */ +.highlight .ne { color: #007020 } /* Name.Exception */ +.highlight .nf { color: #06287e } /* Name.Function */ +.highlight .nl { color: #002070; font-weight: bold } /* Name.Label */ +.highlight .nn { color: #0e84b5; font-weight: bold } /* Name.Namespace */ +.highlight .nt { color: #062873; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #bb60d5 } /* Name.Variable */ +.highlight .ow { color: #007020; font-weight: bold } /* Operator.Word */ +.highlight .w { color: #bbbbbb } /* Text.Whitespace */ +.highlight .mb { color: #40a070 } /* Literal.Number.Bin */ +.highlight .mf { color: #40a070 } /* Literal.Number.Float */ +.highlight .mh { color: #40a070 } /* Literal.Number.Hex */ +.highlight .mi { color: #40a070 } /* Literal.Number.Integer */ +.highlight .mo { color: #40a070 } /* Literal.Number.Oct */ +.highlight .sa { color: #4070a0 } /* Literal.String.Affix */ +.highlight .sb { color: #4070a0 } /* Literal.String.Backtick */ +.highlight .sc { color: #4070a0 } /* Literal.String.Char */ +.highlight .dl { color: #4070a0 } /* Literal.String.Delimiter */ +.highlight .sd { color: #4070a0; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4070a0 } /* Literal.String.Double */ +.highlight .se { color: #4070a0; font-weight: bold } /* Literal.String.Escape */ +.highlight .sh { color: #4070a0 } /* Literal.String.Heredoc */ +.highlight .si { color: #70a0d0; font-style: italic } /* Literal.String.Interpol */ +.highlight .sx { color: #c65d09 } /* Literal.String.Other */ +.highlight .sr { color: #235388 } /* Literal.String.Regex */ +.highlight .s1 { color: #4070a0 } /* Literal.String.Single */ +.highlight .ss { color: #517918 } /* Literal.String.Symbol */ +.highlight .bp { color: #007020 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #06287e } /* Name.Function.Magic */ +.highlight .vc { color: #bb60d5 } /* Name.Variable.Class */ +.highlight .vg { color: #bb60d5 } /* Name.Variable.Global */ +.highlight .vi { color: #bb60d5 } /* Name.Variable.Instance */ +.highlight .vm { color: #bb60d5 } /* Name.Variable.Magic */ +.highlight .il { color: #40a070 } /* Literal.Number.Integer.Long */ diff --git a/docs/config-file/templates/js/schema_doc.js b/docs/config-file/templates/js/schema_doc.js new file mode 100644 index 0000000000..e03ce9464b --- /dev/null +++ b/docs/config-file/templates/js/schema_doc.js @@ -0,0 +1,66 @@ +$(document).on('click', 'a[href^="#"]', function(event) { + event.preventDefault(); + history.pushState({}, '', this.href); +}); + +function flashElement(elementId) { + // $( "#" + elementId ).fadeOut(100).fadeIn(200).fadeOut(100).fadeIn(500); + myElement = document.getElementById(elementId); + myElement.classList.add("jsfh-animated-property"); + setTimeout(function() { + myElement.classList.remove("jsfh-animated-property"); + }, 1000); +} + +function setAnchor(anchorLinkDestination) { + // Set anchor link without reloading + history.pushState({}, '', anchorLinkDestination); +} + +function anchorOnLoad() { + // Added to onload on body, checks if there is an anchor link and if so, expand + let linkTarget = decodeURIComponent(window.location.hash.split("?")[0].split("&")[0]); + if (linkTarget[0] === "#") { + linkTarget = linkTarget.substr(1); + } + + if (linkTarget.length > 0) { + anchorLink(linkTarget); + } +} + +function anchorLink(linkTarget) { + const target = $( "#" + linkTarget ); + // Find the targeted element to expand and all its parents that can be expanded + target.parents().addBack().filter(".collapse:not(.show), .tab-pane, [role='tab']").each( + function(index) { + if($( this ).hasClass("collapse")) { + $( this ).collapse("show"); + } else if ($( this ).hasClass("tab-pane")) { + // We have the pane and not the tab itself, find the tab + const tabToShow = $( "a[href='#" + $( this ).attr("id") + "']" ); + if (tabToShow) { + tabToShow.tab("show"); + } + } else if ($( this ).attr("role") === "tab") { + // The tab is not a parent of underlying elements, the tab pane is + // However, it can still be linked directly + $( this ).tab("show"); + } + } + ); + + // Wait a little so the user has time to see the page scroll + // Or maybe it is to be sure everything is expanded before scrolling and I was not able to bind to the bootstrap + // events in a way that works all the time, we may never know + setTimeout(function() { + let targetElement = document.getElementById(linkTarget); + if (targetElement) { + targetElement.scrollIntoView({ block: "center", behavior:"smooth" }); + // Flash the element so that the user notices where the link points to + setTimeout(function() { + flashElement(linkTarget); + }, 500); + } + }, 1000); +} \ No newline at end of file diff --git a/docs/config-file/templates/js/schema_doc.min.js b/docs/config-file/templates/js/schema_doc.min.js new file mode 100644 index 0000000000..17eceaf59a --- /dev/null +++ b/docs/config-file/templates/js/schema_doc.min.js @@ -0,0 +1 @@ +$(document).on("click",'a[href^="#"]',function(event){event.preventDefault();history.pushState({},"",this.href)});function flashElement(elementId){myElement=document.getElementById(elementId);myElement.classList.add("jsfh-animated-property");setTimeout(function(){myElement.classList.remove("jsfh-animated-property")},1e3)}function setAnchor(anchorLinkDestination){history.pushState({},"",anchorLinkDestination)}function anchorOnLoad(){let linkTarget=decodeURIComponent(window.location.hash.split("?")[0].split("&")[0]);if(linkTarget[0]==="#"){linkTarget=linkTarget.substr(1)}if(linkTarget.length>0){anchorLink(linkTarget)}}function anchorLink(linkTarget){const target=$("#"+linkTarget);target.parents().addBack().filter(".collapse:not(.show), .tab-pane, [role='tab']").each(function(index){if($(this).hasClass("collapse")){$(this).collapse("show")}else if($(this).hasClass("tab-pane")){const tabToShow=$("a[href='#"+$(this).attr("id")+"']");if(tabToShow){tabToShow.tab("show")}}else if($(this).attr("role")==="tab"){$(this).tab("show")}});setTimeout(function(){let targetElement=document.getElementById(linkTarget);if(targetElement){targetElement.scrollIntoView({block:"center",behavior:"smooth"});setTimeout(function(){flashElement(linkTarget)},500)}},1e3)} \ No newline at end of file diff --git a/docs/config-file/templates/js/section_array.html b/docs/config-file/templates/js/section_array.html new file mode 100644 index 0000000000..a676f0d919 --- /dev/null +++ b/docs/config-file/templates/js/section_array.html @@ -0,0 +1,36 @@ +{%- if schema.kw_min_items -%} + {{ restriction("Must contain a minimum of " ~ schema.kw_min_items.literal ~ " items", "min-items", schema.kw_min_items.html_id) }} +{%- endif -%} +{%- if schema.kw_max_items -%} + {{ restriction("Must contain a maximum of " ~ schema.kw_max_items.literal ~ " items", "max-items", schema.kw_max_items.html_id) }} +{%- endif -%} +{%- if schema.kw_unique_items and schema.kw_unique_items.literal == True -%} + {{ restriction("All items must be unique", "unique-items", schema.kw_unique_items.html_id) }} +{%- endif -%} +{%- if schema.array_items_def -%} +

Each item of this array must be:

+
+
+ {{ content(schema.array_items_def) }} +
+
+{%- endif -%} +{%- if schema.tuple_validation_items -%} +

Tuple Validation

+ {% for item in schema.tuple_validation_items %} +
Item at {{ loop.index }} must be:
+
+
+ {{ content(item) }} +
+
+ {% endfor %} +{%- endif -%} +{%- if schema.kw_contains and schema.kw_contains.literal != {} -%} +

At least one of the items must be:

+
+
+ {{ content(schema.kw_contains) }} +
+
+{%- endif -%} \ No newline at end of file diff --git a/docs/config-file/templates/js/section_conditional_subschema.html b/docs/config-file/templates/js/section_conditional_subschema.html new file mode 100644 index 0000000000..e543669d96 --- /dev/null +++ b/docs/config-file/templates/js/section_conditional_subschema.html @@ -0,0 +1,58 @@ +

+ +

+

If the conditions in the "If" tab are respected, then the conditions in the "Then" tab should be respected. + Otherwise, the conditions in the "Else" tab should be respected.

+ + +
+ {% set tab_id = schema.kw_if.html_id %} +
+ {{ content(schema.kw_if) }} +
+ + {% if schema.kw_then %} + {% set tab_id = schema.kw_then.html_id %} +
+ {{ content(schema.kw_then) }} +
+ {%- endif -%} + + {%- if schema.kw_else -%} + {% set tab_id = schema.kw_else.html_id %} +
+ {{ content(schema.kw_else) }} +
+ {%- endif -%} +
\ No newline at end of file diff --git a/docs/config-file/templates/js/section_description.html b/docs/config-file/templates/js/section_description.html new file mode 100644 index 0000000000..e0cc3fab5f --- /dev/null +++ b/docs/config-file/templates/js/section_description.html @@ -0,0 +1,16 @@ + +{# Display description #} +{%- if description -%} + {%- if not config.collapse_long_descriptions or description is description_short -%} + {{ description }} + {%- else -%} +
+ {{ description }} +
+
+ +
+ {%- endif -%} +{%- endif -%} diff --git a/docs/config-file/templates/js/section_examples.html b/docs/config-file/templates/js/section_examples.html new file mode 100644 index 0000000000..7c8fa1e540 --- /dev/null +++ b/docs/config-file/templates/js/section_examples.html @@ -0,0 +1,18 @@ +
+
Example{% if examples|length > 1 %}s{% endif %}:
+
+ +{%- for example in examples -%} + {%- set example_id = schema.html_id ~ "_ex" ~ loop.index -%} + {%- set example_is_long = config.collapse_long_examples and example is not description_short -%} + {%- if example_is_long -%} + + {%- endif -%} +
+ {%- if not examples_as_yaml -%} + {{ example | highlight_json_example }} + {%- else -%} + {{ example | highlight_yaml_example }} + {%- endif -%} +
+{%- endfor -%} diff --git a/docs/config-file/templates/js/section_not.html b/docs/config-file/templates/js/section_not.html new file mode 100644 index 0000000000..ed0f52224e --- /dev/null +++ b/docs/config-file/templates/js/section_not.html @@ -0,0 +1,8 @@ +
+

Must not be:

+
+
+ {{ content(schema.kw_not) }} +
+
+
\ No newline at end of file diff --git a/docs/config-file/templates/js/section_properties_2.html b/docs/config-file/templates/js/section_properties_2.html new file mode 100644 index 0000000000..8f3ecf6b58 --- /dev/null +++ b/docs/config-file/templates/js/section_properties_2.html @@ -0,0 +1,4 @@ +{% set html_id = sub_property.html_id %} + +{{ content(sub_property) }} +
diff --git a/docs/config-file/templates/js/section_properties_object.html b/docs/config-file/templates/js/section_properties_object.html new file mode 100644 index 0000000000..e59bc5a2d1 --- /dev/null +++ b/docs/config-file/templates/js/section_properties_object.html @@ -0,0 +1,60 @@ +{% set html_id = sub_property.html_id %} +
+
+
+

+ + +

+ {{ sub_property.description | escape }} +
+ +
+
+ {%- if sub_property.is_pattern_property -%} +

+ +

+

All properties whose name matches the following regular expression must respect the following conditions

+ Property name regular expression: {{ sub_property.property_name | escape }} +
+ {%- endif -%} + + {%- if sub_property.is_additional_properties -%} + {%- if sub_property.is_additional_properties_schema -%} +

Each additional property must conform to the following schema

+ {%- else -%} +

Additional Properties of any type are allowed.

+ {%- endif -%} + {%- endif -%} + + {{ content(sub_property) }} +
+
+
+
diff --git a/docs/config-file/templates/js/section_undocumented_required_properties.html b/docs/config-file/templates/js/section_undocumented_required_properties.html new file mode 100644 index 0000000000..7e7ceaebee --- /dev/null +++ b/docs/config-file/templates/js/section_undocumented_required_properties.html @@ -0,0 +1,11 @@ +{%- set undocumented_required_properties = schema | get_undocumented_required_properties -%} +{%- if undocumented_required_properties-%} +
+

The following properties are required:

+ +
+{%- endif -%} \ No newline at end of file diff --git a/docs/config-file/templates/js/tabbed_section.html b/docs/config-file/templates/js/tabbed_section.html new file mode 100644 index 0000000000..9cecf4c142 --- /dev/null +++ b/docs/config-file/templates/js/tabbed_section.html @@ -0,0 +1,25 @@ +

+ +

+{%- set tab_label = "Option" -%} +{%- if operator == "allOf" -%} + {%- set tab_label = "Requirement" -%} +{%- endif -%} + +
+ {%- for node in current_node.array_items -%} +
+ {{ content(node) }} +
+ {%- endfor -%} +
\ No newline at end of file diff --git a/docs/config-file/templates/md/base.md b/docs/config-file/templates/md/base.md new file mode 100644 index 0000000000..8ca9761f96 --- /dev/null +++ b/docs/config-file/templates/md/base.md @@ -0,0 +1,17 @@ +{% set depth = 0 %} +{{ schema.keywords.get("title").literal | default("Schema Docs") | md_heading(depth) }} +{% set contentBase %} +{% with schema=schema, skip_headers=False, depth=depth %} + {% include "content.md" %} +{% endwith %} +{% endset %} + + + +{{ contentBase }} + +---------------------------------------------------------------------------------------------------------------------------- +{% if config.with_footer -%} +Generated using [json-schema-for-humans](https://github.com/coveooss/json-schema-for-humans){% if config.footer_show_time %} on {{ get_local_time() }}{% endif %} + +{% endif -%} diff --git a/docs/config-file/templates/md/breadcrumbs.md b/docs/config-file/templates/md/breadcrumbs.md new file mode 100644 index 0000000000..6ab031d6c4 --- /dev/null +++ b/docs/config-file/templates/md/breadcrumbs.md @@ -0,0 +1,24 @@ +{% set my_string = [] %} +{%- for node in schema.nodes_from_root -%} + {%- if not loop.first -%} + {%- set _ = my_string.append(node.name_for_breadcrumbs) -%} + {%- endif -%} +{%- endfor -%} + + +{%- filter md_escape_for_table -%} +{# + {%- if config.show_breadcrumbs -%} + {%- for node in schema.nodes_from_root -%} + {{ node.name_for_breadcrumbs }}{%- if not loop.last %} > {% endif -%} + {%- endfor -%} + {%- else -%} + {{ schema.name_for_breadcrumbs }} + {%- endif -%} +#} +{%- if schema.type_name == "object" -%} +[{{ my_string|join('.') }}] +{%- else -%} +{{ my_string|join('.') }} +{%- endif -%} +{%- endfilter -%} diff --git a/docs/config-file/templates/md/content.md b/docs/config-file/templates/md/content.md new file mode 100644 index 0000000000..7b645745b3 --- /dev/null +++ b/docs/config-file/templates/md/content.md @@ -0,0 +1,111 @@ +{# + content is a template and not a macro in md + because macro parameters are not through context + when rendering a template from the macro and it caused + serious problems when using recursive calls + mandatory context parameters: + schema +#} +{# context parameters default values #} +{% set skip_headers = skip_headers or False %} +{% set depth = depth or 0 %} +{# end context parameters #} + +{% set keys = schema.keywords %} +{%- if not skip_headers %} + +{% if schema.title and schema.title | length > 0 %} +**Title:** {{ schema.title }} +{% endif %} + +**Type:** : `{{ schema.type_name }}` +{% if schema.type_name != "object" and schema.default_value %} + {# input schema #} + +**Default:** `{{schema.default_value}}` + + +{% endif %} +{# +{{ schema | md_type_info_table | md_generate_table }} +#} +{% set description = (schema | get_description) %} +{% include "section_description.md" %} +{% endif %} + + + +{# Display examples #} +{% set examples = schema.examples %} +{% if examples %} + {% include "section_examples.md" %} +{% endif %} + +{% if schema.type_name != "object" and schema.default_value %} + {% include "generate_toml_example.md" %} +{% endif %} + +{% if schema.should_be_a_link(config) %} +{% elif schema.refers_to -%} + {%- with schema=schema.refers_to_merged, skip_headers=True, depth=depth -%} + {% include "content.md" %} + {% endwith %} +{% else %} + {# Properties, pattern properties, additional properties #} + {% if schema.type_name == "object" %} + + {{- schema | md_properties_table | md_generate_table -}} + + {% endif %} + + {# Combining: allOf, anyOf, oneOf, not #} + {% if schema.kw_all_of %} + {% with operator="allOf", title="All of(Requirement)", current_node=schema.kw_all_of, skip_required=True %} + {% include "tabbed_section.md" %} + {% endwith %} + {% endif %} + {% if schema.kw_any_of %} + {% with operator="anyOf", title="Any of(Option)", current_node=schema.kw_any_of, skip_required=True %} + {% include "tabbed_section.md" %} + {% endwith %} + {% endif %} + {% if schema.kw_one_of %} + {% with operator="oneOf", title="One of(Option)",current_node=schema.kw_one_of, skip_required=True %} + {% include "tabbed_section.md" %} + {% endwith %} + {% endif %} + {% if schema.kw_not %} + {% include "section_not.md" %} + {% endif %} + + {# Enum and const #} + {% if schema.kw_enum -%} + {% include "section_one_of.md" %} + {%- endif %} + {%- if schema.kw_const -%} + Specific value: `{{ schema.kw_const.raw | python_to_json }}` + {%- endif -%} + + {# Conditional subschema, or if-then-else section #} + {% if schema.has_conditional %} + {% with skip_headers=False, depth=depth+1 %} + {% include "section_conditional_subschema.md" %} + {% endwith %} + {% endif %} + + {# Required properties that are not defined under "properties". They will only be listed #} + {% include "section_undocumented_required_properties.md" %} + + {# Show the requested type(s) #} + {{- schema | md_restrictions_table | md_generate_table -}} + + {# Show array restrictions #} + {% if schema.type_name.startswith("array") %} + {% include "section_array.md" %} + {% endif %} + + {# details of Properties, pattern properties, additional properties #} + {% if schema.type_name == "object" %} + {% include "section_properties_details.md" %} + {% endif %} +{% endif %} diff --git a/docs/config-file/templates/md/generate_toml_example.md b/docs/config-file/templates/md/generate_toml_example.md new file mode 100644 index 0000000000..7659afdcb9 --- /dev/null +++ b/docs/config-file/templates/md/generate_toml_example.md @@ -0,0 +1,22 @@ +{% set breadcumbs = [] %} +{% set breadcumbs_section = [] %} +{%- for node in schema.nodes_from_root -%} + {%- if not loop.first -%} + {%- set _ = breadcumbs.append(node.name_for_breadcrumbs) -%} + {%- if node.type_name == "object" -%} + {%- set _ = breadcumbs_section.append(node.name_for_breadcrumbs) -%} + {%- endif -%} + {%- endif -%} +{%- endfor -%} + +{% set section_name = breadcumbs_section|join('.') %} +{% set variable_name = breadcumbs|last() %} + +**Example setting the default value** ({{schema.default_value}}): +``` +{% if section_name != "" %} +[{{section_name}}] +{% endif %} +{{variable_name}}={{schema.default_value}} +``` + diff --git a/docs/config-file/templates/md/section_array.md b/docs/config-file/templates/md/section_array.md new file mode 100644 index 0000000000..693ba53636 --- /dev/null +++ b/docs/config-file/templates/md/section_array.md @@ -0,0 +1,38 @@ +{% if schema.type_name == "array of object" %} +{{ schema | md_array_restrictions | md_generate_table }} + +{% if schema.array_items_def or schema.tuple_validation_items %} +{{ schema | md_array_items_restrictions | md_generate_table }} +{% endif %} + + +{% if schema.array_items_def %} +{% filter md_heading(depth+1) %} +{% with schema=schema.array_items_def %}{%- include "breadcrumbs.md" %}{% endwith %} +{% endfilter %} +{% with schema=schema.array_items_def, skip_headers=False, depth=depth+1, skip_required=True %} + {% include "content.md" %} +{% endwith %} +{% endif %} + +{# +{% if schema.tuple_validation_items %} +{% for item in schema.tuple_validation_items %} + {% filter md_heading(depth+1) %} + {% with schema=item %}{%- include "breadcrumbs.md" %}{% endwith %} + {% endfilter %} + {% with schema=item, skip_headers=False, depth=depth+1, skip_required=True %} + {% include "content.md" %} + {% endwith %} +{% endfor %} +{% endif %} + + +{% if schema.kw_contains and schema.kw_contains.literal != {} %} +{{ "At least one of the items must be" | md_heading(depth+1) }} +{% with schema=schema.kw_contains, skip_headers=False, depth=depth+1, skip_required=True %} + {% include "content.md" %} +{% endwith %} +{% endif %} +#} +{% endif %} \ No newline at end of file diff --git a/docs/config-file/templates/md/section_conditional_subschema.md b/docs/config-file/templates/md/section_conditional_subschema.md new file mode 100644 index 0000000000..7abbbd50f6 --- /dev/null +++ b/docs/config-file/templates/md/section_conditional_subschema.md @@ -0,0 +1,24 @@ +{% if schema.kw_if %} + {% set first_property = schema.kw_if | get_first_property %} + + {% if schema.kw_then %} + {%- filter md_heading(depth) -%}If ( + {{- first_property.property_name | md_escape_for_table -}} + {{- " = " -}} + {{- first_property.kw_const.literal | python_to_json -}} + ){%- endfilter -%} + {% with schema=schema.kw_then, skip_headers=False, depth=depth %} + {% include "content.md" %} + {% endwith %} + {% endif %} + {% if schema.kw_else %} + {%- filter md_heading(depth) -%}Else (i.e. {{ " " }} + {{- first_property.property_name | md_escape_for_table -}} + {{- " != " -}} + {{- first_property.kw_const.literal | python_to_json -}} + ){%- endfilter -%} + {% with schema=schema.kw_else, skip_headers=False, depth=depth %} + {% include "content.md" %} + {% endwith %} + {% endif %} +{% endif %} \ No newline at end of file diff --git a/docs/config-file/templates/md/section_description.md b/docs/config-file/templates/md/section_description.md new file mode 100644 index 0000000000..2051470663 --- /dev/null +++ b/docs/config-file/templates/md/section_description.md @@ -0,0 +1,4 @@ +{# Display description #} +{% if description %} +**Description:**{{ " " }}{{ description }} +{% endif %} \ No newline at end of file diff --git a/docs/config-file/templates/md/section_examples.md b/docs/config-file/templates/md/section_examples.md new file mode 100644 index 0000000000..d52018d127 --- /dev/null +++ b/docs/config-file/templates/md/section_examples.md @@ -0,0 +1,16 @@ +**Example{% if examples|length > 1 %}s{% endif %}:**{{ " " }} + +{% for example in examples %} + {%- if loop.first %}{{ "\n" }}{% endif -%} + {% set example_id = schema.html_id ~ "_ex" ~ loop.index %} + {%- if not examples_as_yaml -%} + {{- "" }}```json + {{- "\n" }}{{ example }} + {{- "\n" }}``` + {%- else -%} + {{- "" }}```yaml + {{- "\n" }}{{ example | yaml_example }} + {{- "\n" }}``` + {%- endif -%} + {{ "\n" }} +{% endfor %} diff --git a/docs/config-file/templates/md/section_not.md b/docs/config-file/templates/md/section_not.md new file mode 100644 index 0000000000..45a4c3c912 --- /dev/null +++ b/docs/config-file/templates/md/section_not.md @@ -0,0 +1,4 @@ +{{ "Must **not** be" | md_heading(depth+1) }} +{% with schema=schema.kw_not, skip_headers=False, depth=depth+1, skip_required=True %} + {% include "content.md" %} +{% endwith %} \ No newline at end of file diff --git a/docs/config-file/templates/md/section_one_of.md b/docs/config-file/templates/md/section_one_of.md new file mode 100644 index 0000000000..a7c1ad27f3 --- /dev/null +++ b/docs/config-file/templates/md/section_one_of.md @@ -0,0 +1,4 @@ +Must be one of: +{% for enum_choice in schema.kw_enum.array_items %} +* {{ enum_choice.literal | python_to_json }} +{% endfor %} \ No newline at end of file diff --git a/docs/config-file/templates/md/section_properties_details.md b/docs/config-file/templates/md/section_properties_details.md new file mode 100644 index 0000000000..2d8a17b9d3 --- /dev/null +++ b/docs/config-file/templates/md/section_properties_details.md @@ -0,0 +1,32 @@ +{% for sub_property in schema.iterate_properties %} + {%- if sub_property.is_additional_properties and not sub_property.is_additional_properties_schema -%} + {% continue %} + {% endif %} + + {% set html_id = sub_property.html_id %} + + {% set description = sub_property | get_description %} + + {% filter md_heading(depth + 1, html_id) -%} + {%- filter replace('\n', '') -%} + {%- if not skip_required and sub_property.property_name -%} + {{ md_badge("Required", "blue", fallback=False) if sub_property.is_required_property else md_badge("Optional", "yellow", fallback=False) -}} + {%- endif -%} + {%- if sub_property is deprecated -%}~~{%- endif -%} + {%- if sub_property.is_pattern_property %} Pattern{% endif %} `{% with schema=sub_property %}{%- include "breadcrumbs.md" %}{% endwith %}` + {%- if sub_property is deprecated -%}~~{%- endif -%} + {%- endfilter %} + {%- endfilter %} + + {% if sub_property.is_pattern_property %} +> All properties whose name matches the regular expression +```{{ sub_property.property_name }}``` ([Test](https://regex101.com/?regex={{ sub_property.property_name | urlencode }})) +must respect the following conditions + {% endif %} + + + {% with schema=sub_property, skip_headers=False, depth=depth+1 %} + {% include "content.md" %} + {% endwith %} + +{% endfor %} diff --git a/docs/config-file/templates/md/section_undocumented_required_properties.md b/docs/config-file/templates/md/section_undocumented_required_properties.md new file mode 100644 index 0000000000..1756ef9329 --- /dev/null +++ b/docs/config-file/templates/md/section_undocumented_required_properties.md @@ -0,0 +1,7 @@ +{% set undocumented_required_properties = schema | get_undocumented_required_properties %} +{% if undocumented_required_properties%} +{{ "The following properties are required" | md_heading(depth+1) }} +{% for required_property in undocumented_required_properties %} +* {{ required_property }} +{% endfor %} +{% endif %} \ No newline at end of file diff --git a/docs/config-file/templates/md/tabbed_section.md b/docs/config-file/templates/md/tabbed_section.md new file mode 100644 index 0000000000..3f43eb4eff --- /dev/null +++ b/docs/config-file/templates/md/tabbed_section.md @@ -0,0 +1,11 @@ + +{{ current_node | md_array_items(title) | md_generate_table }} + +{% for node in current_node.array_items %} + {% filter md_heading(depth+1, node.html_id) -%} + {% if node.is_pattern_property %}Pattern{% endif %} Property `{% with schema=node %}{%- include "breadcrumbs.md" %}{% endwith %}` + {%- endfilter %} + {% with schema=node, skip_headers=False, depth=depth+1 %} + {% include "content.md" %} + {% endwith %} +{% endfor %} \ No newline at end of file diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 0000000000..f38df7dfb1 --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,42 @@ +## Configuration + +To configure a node you need 3 files: +- Node configuration +- Genesis configuration +- Prover configuration + +### Node Config +This file is a [TOML](https://en.wikipedia.org/wiki/TOML#) formatted file. +You could find some examples here: + - `config/environments/local/local.node.config.toml`: running a permisionless node + - `config/environments/mainnet/public.node.config.toml` + - `config/environments/public/public.node.config.toml` + - `test/config/test.node.config.toml`: configuration for a trusted node used in CI + + For details about the contents you can read specifications [here](config-file/node-config-doc.md) + +This file is used for trusted and for permisionless nodes. In the case of permissionless node you only need to setup next sections: + + You could **override values with environment variables**. The variables needs to have next format: +`ZKEVM_NODE_`[
`_`]*` + +For example: +`ZKEVM_NODE_STATEDB_HOST="localhost"` override value of section `[StateDB]` key `Host` + +### Network Genesis Config +This file is a [JSON](https://en.wikipedia.org/wiki/JSON) formatted file. +This contain all the info information relating to the relation between L1 and L2 network's (e.g. contracts, etc..) also known as genesis file + +You could find an example here: +- `config/environments/local/local.genesis.config.json`: + +For details about the contents you can read specifications [here](config-file/custom_network-config-doc.md) + + +### Prover Config + +Please check [prover repository](https://github.com/0xPolygonHermez/zkevm-prover) for further information + +Examples: + - `config/environments/mainnet/public.prover.config.json` + - `config/environments/testnet/testnet.prover.config.json` diff --git a/docs/snap_restore.md b/docs/snap_restore.md new file mode 100644 index 0000000000..7d9de4c8b7 --- /dev/null +++ b/docs/snap_restore.md @@ -0,0 +1,85 @@ +# How to use snapshot/restore feature + +This feature is for fast replication of nodes. It creates a backup of database and allows restoration in another database to save synchronization time. +- It uses the tools `pg_dump` and `pg_restore` and requires them to match the same version as the server. + +## Snapshot + +This feature creates a dump of entire database + +### Usage + +``` +NAME: + zkevm-node snapshot - Snapshot the state db + +USAGE: + zkevm-node snapshot [command options] [arguments...] + +OPTIONS: + --cfg FILE, -c FILE Configuration FILE + --help, -h show help +``` + +**Make sure that the config file contains the data required to connect to `HashDB` database**, for example: +``` +[HashDB] +User = "prover_user" +Password = "prover_pass" +Name = "prover_db" +Host = "zkevm-state-db" +Port = "5432" +EnableLog = false +MaxConns = 200 +``` + +This generates two files in the current working path: +* For stateDB: `_`\`_`\`_`\`.sql.tar.gz` +* For hashDB: `_`\`_`\`_`\`.sql.tar.gz` + +#### Example of invocation: +``` +# cd /tmp/ && /app/zkevm-node snap -c /app/config.toml +(...) +# ls -1 +prover_db_1689925019_v0.2.0-RC9-15-gd39e7f1e_d39e7f1e.sql.tar.gz +state_db_1689925019_v0.2.0-RC9-15-gd39e7f1e_d39e7f1e.sql.tar.gz +``` + + +## Restore +It populates state, and hash databases with the previous backup + +**Be sure that none node service is running!** + +### Usage + +``` +NAME: + zkevm-node restore - Restore snapshot of the state db + +USAGE: + zkevm-node restore [command options] [arguments...] + +OPTIONS: + --inputfilestate value, --is value Input file stateDB + --inputfileHash value, --ih value Input file hashDB + --cfg FILE, -c FILE Configuration FILE + --help, -h show help +``` + +#### Example of invocation: +``` +/app/zkevm-node restore -c /app/config.toml --is /tmp/state_db_1689925019_v0.2.0-RC9-15-gd39e7f1e_d39e7f1e.sql.tar.gz --ih /tmp/prover_db_1689925019_v0.2.0-RC9-15-gd39e7f1e_d39e7f1e.sql.tar +.gz +``` + +# How to test +You could use `test/docker-compose.yml` to interact with `zkevm-node`: +* Run the containers: `make run` +* Launch a interactive container: +``` +docker-compose up -d zkevm-sh +docker-compose exec zkevm-sh /bin/sh +``` +* Inside this shell you can execute the examples of invocation diff --git a/etherman/config.go b/etherman/config.go index b91e4948f7..44b7b60bb6 100644 --- a/etherman/config.go +++ b/etherman/config.go @@ -1,16 +1,17 @@ package etherman -import ( - "github.com/0xPolygonHermez/zkevm-node/etherman/etherscan" -) +import "github.com/0xPolygonHermez/zkevm-node/etherman/etherscan" // Config represents the configuration of the etherman type Config struct { + // URL is the URL of the Ethereum node for L1 URL string `mapstructure:"URL"` - PrivateKeyPath string `mapstructure:"PrivateKeyPath"` - PrivateKeyPassword string `mapstructure:"PrivateKeyPassword"` + //PrivateKeyPath string `mapstructure:"PrivateKeyPath"` + //PrivateKeyPassword string `mapstructure:"PrivateKeyPassword"` + // allow that L1 gas price calculation use multiples sources MultiGasProvider bool `mapstructure:"MultiGasProvider"` - Etherscan etherscan.Config + // Configuration for use Etherscan as used as gas provider, basically it needs the API-KEY + Etherscan etherscan.Config } diff --git a/etherman/etherman.go b/etherman/etherman.go index 868121bf6a..fd159cbe89 100644 --- a/etherman/etherman.go +++ b/etherman/etherman.go @@ -115,9 +115,13 @@ type ethereumClient interface { // L1Config represents the configuration of the network used in L1 type L1Config struct { - L1ChainID uint64 `json:"chainId"` - ZkEVMAddr common.Address `json:"polygonZkEVMAddress"` - MaticAddr common.Address `json:"maticTokenAddress"` + // Chain ID of the L1 network + L1ChainID uint64 `json:"chainId"` + // Address of the L1 contract + ZkEVMAddr common.Address `json:"polygonZkEVMAddress"` + // Address of the L1 Matic token Contract + MaticAddr common.Address `json:"maticTokenAddress"` + // Address of the L1 GlobalExitRootManager contract GlobalExitRootManagerAddr common.Address `json:"polygonZkEVMGlobalExitRootAddress"` } diff --git a/etherman/etherscan/etherscan.go b/etherman/etherscan/etherscan.go index b83f75f0fc..6f721cbbe2 100644 --- a/etherman/etherscan/etherscan.go +++ b/etherman/etherscan/etherscan.go @@ -27,8 +27,10 @@ type gasPriceEtherscan struct { // Config structure type Config struct { + // Need API key to use etherscan, if it's empty etherscan is not used ApiKey string `mapstructure:"ApiKey"` - Url string + // URL of the etherscan API. Overwritten with a hardcoded URL: "https://api.etherscan.io/api?module=gastracker&action=gasoracle&apikey=" + Url string } // Client for etherscan diff --git a/event/event.go b/event/event.go index 234812e7aa..5da961eb14 100644 --- a/event/event.go +++ b/event/event.go @@ -36,7 +36,8 @@ const ( EventID_FinalizerRestart EventID = "FINALIZER RESTART" // EventID_FinalizerBreakEvenGasPriceBigDifference is triggered when the finalizer recalculates the break even gas price and detects a big difference EventID_FinalizerBreakEvenGasPriceBigDifference EventID = "FINALIZER BREAK EVEN GAS PRICE BIG DIFFERENCE" - + // EventID_SynchonizerRestart is triggered when the Synchonizer restarts + EventID_SynchonizerRestart EventID = "SYNCHRONIZER RESTART" // Source_Node is the source of the event Source_Node Source = "node" diff --git a/event/eventlog.go b/event/eventlog.go index 4c1f0ce0a9..a7a0ca91b4 100644 --- a/event/eventlog.go +++ b/event/eventlog.go @@ -6,7 +6,7 @@ import ( "time" "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" + "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" ) // EventLog is the main struct for the event log @@ -29,7 +29,7 @@ func (e *EventLog) LogEvent(ctx context.Context, event *Event) error { } // LogExecutorError is used to store Executor error for runtime debugging -func (e *EventLog) LogExecutorError(ctx context.Context, responseError pb.ExecutorError, processBatchRequest *pb.ProcessBatchRequest) { +func (e *EventLog) LogExecutorError(ctx context.Context, responseError executor.ExecutorError, processBatchRequest *executor.ProcessBatchRequest) { timestamp := time.Now() log.Errorf("error found in the executor: %v at %v", responseError, timestamp) payload, err := json.Marshal(processBatchRequest) diff --git a/gasprice/config.go b/gasprice/config.go index 67f2a45096..6e0426ae40 100644 --- a/gasprice/config.go +++ b/gasprice/config.go @@ -22,7 +22,10 @@ const ( type Config struct { Type EstimatorType `mapstructure:"Type"` - DefaultGasPriceWei uint64 `mapstructure:"DefaultGasPriceWei"` + // DefaultGasPriceWei is used to set the gas price to be used by the default gas pricer or as minimim gas price by the follower gas pricer. + DefaultGasPriceWei uint64 `mapstructure:"DefaultGasPriceWei"` + // MaxGasPriceWei is used to limit the gas price returned by the follower gas pricer to a maximum value. It is ignored if 0. + MaxGasPriceWei uint64 `mapstructure:"MaxGasPriceWei"` MaxPrice *big.Int `mapstructure:"MaxPrice"` IgnorePrice *big.Int `mapstructure:"IgnorePrice"` CheckBlocks int `mapstructure:"CheckBlocks"` diff --git a/gasprice/follower.go b/gasprice/follower.go index 8bfc4f0fc7..fa5594336a 100644 --- a/gasprice/follower.go +++ b/gasprice/follower.go @@ -49,9 +49,14 @@ func (f *FollowerGasPrice) UpdateGasPriceAvg() { res.Int(result) minGasPrice := big.NewInt(0).SetUint64(f.cfg.DefaultGasPriceWei) if minGasPrice.Cmp(result) == 1 { // minGasPrice > result - log.Warn("setting minGasPrice for L2") + log.Warn("setting DefaultGasPriceWei for L2") result = minGasPrice } + maxGasPrice := new(big.Int).SetUint64(f.cfg.MaxGasPriceWei) + if f.cfg.MaxGasPriceWei > 0 && result.Cmp(maxGasPrice) == 1 { // result > maxGasPrice + log.Warn("setting MaxGasPriceWei for L2") + result = maxGasPrice + } var truncateValue *big.Int log.Debug("Full L2 gas price value: ", result, ". Length: ", len(result.String())) numLength := len(result.String()) diff --git a/gasprice/follower_test.go b/gasprice/follower_test.go index bd858ca5ae..9bd1e7d129 100644 --- a/gasprice/follower_test.go +++ b/gasprice/follower_test.go @@ -39,3 +39,24 @@ func TestUpdateGasPriceFollower(t *testing.T) { poolM.On("SetGasPrices", ctx, l2GasPrice, l1GasPrice.Uint64()).Return(nil).Once() f.UpdateGasPriceAvg() } + +func TestLimitMasGasPrice(t *testing.T) { + ctx := context.Background() + var d time.Duration = 1000000000 + + cfg := Config{ + Type: FollowerType, + DefaultGasPriceWei: 100000000, + MaxGasPriceWei: 50000000, + UpdatePeriod: types.NewDuration(d), + Factor: 0.5, + } + l1GasPrice := big.NewInt(1000000000) + poolM := new(poolMock) + ethM := new(ethermanMock) + ethM.On("GetL1GasPrice", ctx).Return(l1GasPrice) + // Ensure SetGasPrices is called with the MaxGasPriceWei + poolM.On("SetGasPrices", ctx, cfg.MaxGasPriceWei, l1GasPrice.Uint64()).Return(nil) + f := newFollowerGasPriceSuggester(ctx, cfg, poolM, ethM) + f.UpdateGasPriceAvg() +} diff --git a/go.mod b/go.mod index 12d17e71e9..6602d764be 100644 --- a/go.mod +++ b/go.mod @@ -44,6 +44,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chigopher/pathlib v0.15.0 // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect @@ -75,6 +76,10 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/holiman/bloomfilter/v2 v2.0.3 // indirect github.com/huin/goupnp v1.0.3 // indirect + github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 // indirect + github.com/iancoleman/strcase v0.2.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/jsonschema v0.7.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect github.com/jackc/pgio v1.0.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect @@ -84,6 +89,7 @@ require ( github.com/jackc/puddle v1.3.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/jinzhu/copier v0.3.5 // indirect github.com/karrick/godirwalk v1.17.0 // indirect github.com/kevinburke/ssh_config v1.2.0 // indirect github.com/klauspost/compress v1.15.15 // indirect @@ -94,8 +100,11 @@ require ( github.com/markbates/errx v1.1.0 // indirect github.com/markbates/oncer v1.0.0 // indirect github.com/markbates/safe v1.0.1 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-runewidth v0.0.9 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/pelletier/go-toml/v2 v2.0.8 // indirect github.com/pjbgf/sha1cd v0.3.0 // indirect @@ -103,12 +112,14 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/procfs v0.10.1 // indirect github.com/rogpeppe/go-internal v1.10.0 // indirect + github.com/rs/zerolog v1.29.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sergi/go-diff v1.2.0 // indirect github.com/shirou/gopsutil v3.21.4-0.20210419000835-c7a38de76ee5+incompatible // indirect github.com/sirupsen/logrus v1.9.0 // indirect github.com/skeema/knownhosts v1.1.1 // indirect github.com/spf13/cast v1.5.1 // indirect + github.com/spf13/cobra v1.6.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/status-im/keycard-go v0.2.0 // indirect @@ -119,15 +130,18 @@ require ( github.com/tyler-smith/go-bip39 v1.1.0 // indirect github.com/umbracle/fastrlp v0.0.0-20220527094140-59d5dd30e722 // indirect github.com/valyala/fastjson v1.4.1 // indirect + github.com/vektra/mockery/v2 v2.32.0 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect + golang.org/x/mod v0.9.0 // indirect golang.org/x/sys v0.10.0 // indirect golang.org/x/term v0.10.0 // indirect golang.org/x/text v0.11.0 // indirect golang.org/x/time v0.1.0 // indirect + golang.org/x/tools v0.7.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect diff --git a/go.sum b/go.sum index 9237518b9a..ca7c09ed90 100644 --- a/go.sum +++ b/go.sum @@ -75,6 +75,7 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= @@ -90,6 +91,8 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chigopher/pathlib v0.15.0 h1:1pg96WL3iC1/YyWV4UJSl3E0GBf4B+h5amBtsbAAieY= +github.com/chigopher/pathlib v0.15.0/go.mod h1:3+YPPV21mU9vyw8Mjp+F33CyCfE6iOzinpiqBcccv7I= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -113,6 +116,7 @@ github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 h1:ytcWPaNPhNoG github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811/go.mod h1:Nb5lgvnQ2+oGlE/EyZy4+2/CxRh9KfvCXnag1vtpxVM= github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/containerd/continuity v0.0.0-20191214063359-1097c8bae83b h1:pik3LX++5O3UiNWv45wfP/WT81l7ukBJzd3uUiifbSU= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -122,6 +126,7 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= @@ -134,6 +139,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/deckarep/golang-set/v2 v2.1.0 h1:g47V4Or+DUdzbs8FxCCmgb6VYd+ptPAngjM6dtGktsI= github.com/deckarep/golang-set/v2 v2.1.0/go.mod h1:VAky9rY/yGXJOLEDv3OMci+7wtDpOF4IN+y82NBOac4= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= @@ -202,6 +208,7 @@ github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= @@ -362,6 +369,10 @@ github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ= github.com/huin/goupnp v1.0.3/go.mod h1:ZxNlw5WqJj6wSsRK5+YfflQGXYfccj5VgQsMNixHM7Y= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0 h1:i462o439ZjprVSFSZLZxcsoAe592sZB1rci2Z8j4wdk= +github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= +github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/iden3/go-iden3-crypto v0.0.15 h1:4MJYlrot1l31Fzlo2sF56u7EVFeHHJkxGXXZCtESgK4= @@ -370,6 +381,11 @@ github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.7.0 h1:2vgQcBz1n256N+FpX3Jq7Y17AjYt46Ig3zIWyy770So= +github.com/invopop/jsonschema v0.7.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= @@ -429,14 +445,19 @@ github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7Bd github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= +github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI= @@ -497,7 +518,9 @@ github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVc github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= @@ -505,7 +528,9 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= @@ -517,6 +542,7 @@ github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/le github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= @@ -531,7 +557,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= @@ -596,8 +624,13 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= +github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= +github.com/rubenv/sql-migrate v1.5.1 h1:WsZo4jPQfjmddDTh/suANP2aKPA7/ekN0LzuuajgQEo= +github.com/rubenv/sql-migrate v1.5.1/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= github.com/rubenv/sql-migrate v1.5.2 h1:bMDqOnrJVV/6JQgQ/MxOpU+AdO8uzYYA/TxFUBzFtS0= github.com/rubenv/sql-migrate v1.5.2/go.mod h1:H38GW8Vqf8F0Su5XignRyaRcbXbJunSWxs+kmzlg0Is= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= @@ -640,6 +673,8 @@ github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= +github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= @@ -660,6 +695,7 @@ github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -701,6 +737,8 @@ github.com/valyala/fastjson v1.4.1/go.mod h1:nV6MsjxL2IMJQUoHDIrjEI7oLyeqK6aBD7E github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/vektra/mockery/v2 v2.32.0 h1:IXUoQ3s5VxJPpi95DECUmkRUXZ44I1spQ3YatEypIF4= +github.com/vektra/mockery/v2 v2.32.0/go.mod h1:9lREs4VEeQiUS3rizYQx1saxHu2JiIhThP0q9+fDegM= github.com/vmihailenco/bufpool v0.1.11 h1:gOq2WmBrq0i2yW5QJ16ykccQ4wH9UyEsgLm6czKAd94= github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= @@ -743,6 +781,7 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -819,6 +858,8 @@ golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -972,6 +1013,7 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1070,6 +1112,8 @@ golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/jsonrpc/endpoints_eth.go b/jsonrpc/endpoints_eth.go index 6f98e3e8b0..523202f364 100644 --- a/jsonrpc/endpoints_eth.go +++ b/jsonrpc/endpoints_eth.go @@ -565,9 +565,11 @@ func (e *EthEndpoints) GetTransactionByHash(hash types.ArgHash) (interface{}, ty } else if err != nil { return RPCErrorResponse(types.DefaultErrorCode, "failed to load transaction by hash from pool", err) } - tx = &poolTx.Transaction - - return types.NewTransaction(*tx, nil, nil, nil), nil + if poolTx.Status == pool.TxStatusPending { + tx = &poolTx.Transaction + return types.NewTransaction(*tx, nil, nil, nil), nil + } + return nil, nil }) } @@ -801,6 +803,10 @@ func (e *EthEndpoints) SendRawTransaction(httpRequest *http.Request, input strin ip := "" ips := httpRequest.Header.Get("X-Forwarded-For") + // TODO: this is temporary patch remove this log + realIp := httpRequest.Header.Get("X-Real-IP") + log.Infof("X-Forwarded-For: %s, X-Real-IP: %s", ips, realIp) + if ips != "" { ip = strings.Split(ips, ",")[0] } diff --git a/jsonrpc/endpoints_eth_test.go b/jsonrpc/endpoints_eth_test.go index f75d974151..9cddb79737 100644 --- a/jsonrpc/endpoints_eth_test.go +++ b/jsonrpc/endpoints_eth_test.go @@ -2382,7 +2382,7 @@ func TestGetTransactionByHash(t *testing.T) { m.Pool. On("GetTxByHash", context.Background(), tc.Hash). - Return(&pool.Transaction{Transaction: *tc.ExpectedResult}, nil). + Return(&pool.Transaction{Transaction: *tc.ExpectedResult, Status: pool.TxStatusPending}, nil). Once() }, }, diff --git a/jsonrpc/types/types.go b/jsonrpc/types/types.go index 461301ada0..515a9a151d 100644 --- a/jsonrpc/types/types.go +++ b/jsonrpc/types/types.go @@ -518,6 +518,7 @@ type Receipt struct { ToAddr *common.Address `json:"to"` ContractAddress *common.Address `json:"contractAddress"` Type ArgUint64 `json:"type"` + EffectiveGasPrice *ArgBig `json:"effectiveGasPrice,omitempty"` } // NewReceipt creates a new Receipt instance @@ -543,8 +544,7 @@ func NewReceipt(tx types.Transaction, r *types.Receipt) (Receipt, error) { if err != nil { return Receipt{}, err } - - return Receipt{ + receipt := Receipt{ Root: common.BytesToHash(r.PostState), CumulativeGasUsed: ArgUint64(r.CumulativeGasUsed), LogsBloom: r.Bloom, @@ -559,7 +559,12 @@ func NewReceipt(tx types.Transaction, r *types.Receipt) (Receipt, error) { FromAddr: from, ToAddr: to, Type: ArgUint64(r.Type), - }, nil + } + if r.EffectiveGasPrice != nil { + egp := ArgBig(*r.EffectiveGasPrice) + receipt.EffectiveGasPrice = &egp + } + return receipt, nil } // Log structure diff --git a/log/config.go b/log/config.go index 7cee754ad0..2f166ee92c 100644 --- a/log/config.go +++ b/log/config.go @@ -3,9 +3,11 @@ package log // Config for log type Config struct { // Environment defining the log format ("production" or "development"). - Environment LogEnvironment `mapstructure:"Environment"` - // Level of log, e.g. INFO, WARN, ... - Level string `mapstructure:"Level"` + // In development mode enables development mode (which makes DPanicLevel logs panic), uses a console encoder, writes to standard error, and disables sampling. Stacktraces are automatically included on logs of WarnLevel and above. + // Check [here](https://pkg.go.dev/go.uber.org/zap@v1.24.0#NewDevelopmentConfig) + Environment LogEnvironment `mapstructure:"Environment" jsonschema:"enum=production,enum=development"` + // Level of log. As lower value more logs are going to be generated + Level string `mapstructure:"Level" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=dpanic,enum=panic,enum=fatal"` // Outputs Outputs []string `mapstructure:"Outputs"` } diff --git a/metrics/config.go b/metrics/config.go index 742e54aae5..9fe7ef625f 100644 --- a/metrics/config.go +++ b/metrics/config.go @@ -2,10 +2,16 @@ package metrics // Config represents the configuration of the metrics type Config struct { - Host string `mapstructure:"Host"` - Port int `mapstructure:"Port"` - Enabled bool `mapstructure:"Enabled"` - ProfilingHost string `mapstructure:"ProfilingHost"` - ProfilingPort int `mapstructure:"ProfilingPort"` - ProfilingEnabled bool `mapstructure:"ProfilingEnabled"` + // Host is the address to bind the metrics server + Host string `mapstructure:"Host"` + // Port is the port to bind the metrics server + Port int `mapstructure:"Port"` + // Enabled is the flag to enable/disable the metrics server + Enabled bool `mapstructure:"Enabled"` + // ProfilingHost is the address to bind the profiling server + ProfilingHost string `mapstructure:"ProfilingHost"` + // ProfilingPort is the port to bind the profiling server + ProfilingPort int `mapstructure:"ProfilingPort"` + // ProfilingEnabled is the flag to enable/disable the profiling server + ProfilingEnabled bool `mapstructure:"ProfilingEnabled"` } diff --git a/pool/config.go b/pool/config.go index 443795c597..c744fd6c82 100644 --- a/pool/config.go +++ b/pool/config.go @@ -32,24 +32,9 @@ type Config struct { // PollMinAllowedGasPriceInterval is the interval to poll the suggested min gas price for a tx PollMinAllowedGasPriceInterval types.Duration `mapstructure:"PollMinAllowedGasPriceInterval"` - // EffectiveGasPrice is the configuration for the break even and effective gas price calculation - EffectiveGasPrice EffectiveGasPrice `mapstructure:"EffectiveGasPrice"` - // AccountQueue represents the maximum number of non-executable transaction slots permitted per account AccountQueue uint64 `mapstructure:"AccountQueue"` // GlobalQueue represents the maximum number of non-executable transaction slots for all accounts GlobalQueue uint64 `mapstructure:"GlobalQueue"` } - -// EffectiveGasPrice has parameters for the effective gas price calculation. -type EffectiveGasPrice struct { - // L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price - L1GasPriceFactor float64 `mapstructure:"L1GasPriceFactor"` - - // ByteGasCost is the gas cost per byte - ByteGasCost uint64 `mapstructure:"ByteGasCost"` - - // MarginFactor is the margin factor percentage to be added to the L2 min gas price - MarginFactor float64 `mapstructure:"MarginFactor"` -} diff --git a/pool/errors.go b/pool/errors.go index 291a287254..14bcb8b08e 100644 --- a/pool/errors.go +++ b/pool/errors.go @@ -64,7 +64,4 @@ var ( // ErrGasPrice is returned if the transaction has specified lower gas price than the minimum allowed. ErrGasPrice = errors.New("gas price too low") - - // ErrReceivedZeroL1GasPrice is returned if the L1 gas price is 0. - ErrReceivedZeroL1GasPrice = errors.New("received L1 gas price 0") ) diff --git a/pool/pgpoolstorage/pgpoolstorage.go b/pool/pgpoolstorage/pgpoolstorage.go index e35385fcf2..51d5aab1ba 100644 --- a/pool/pgpoolstorage/pgpoolstorage.go +++ b/pool/pgpoolstorage/pgpoolstorage.go @@ -73,10 +73,11 @@ func (p *PostgresPoolStorage) AddTx(ctx context.Context, tx pool.Transaction) er received_at, from_address, is_wip, - ip + ip, + failed_reason ) VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) + ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, NULL) ON CONFLICT (hash) DO UPDATE SET encoded = $2, decoded = $3, @@ -94,7 +95,8 @@ func (p *PostgresPoolStorage) AddTx(ctx context.Context, tx pool.Transaction) er received_at = $15, from_address = $16, is_wip = $17, - ip = $18 + ip = $18, + failed_reason = NULL ` // Get FromAddress from the JSON data diff --git a/pool/pool.go b/pool/pool.go index a1730fde14..7a0ca6a1f1 100644 --- a/pool/pool.go +++ b/pool/pool.go @@ -31,13 +31,6 @@ var ( ErrReplaceUnderpriced = errors.New("replacement transaction underpriced") ) -const ( - // constants used in calculation of BreakEvenGasPrice - signatureBytesLength = 65 - effectivePercentageBytesLength = 1 - totalRlpFieldsLength = signatureBytesLength + effectivePercentageBytesLength -) - // Pool is an implementation of the Pool interface // that uses a postgres database to store the data type Pool struct { @@ -55,11 +48,12 @@ type Pool struct { } type preExecutionResponse struct { - usedZkCounters state.ZKCounters - isOOC bool - isOOG bool - isReverted bool - txResponse *state.ProcessTransactionResponse + usedZkCounters state.ZKCounters + isExecutorLevelError bool + isOOC bool + isOOG bool + isReverted bool + txResponse *state.ProcessTransactionResponse } // GasPrices contains the gas prices for L2 and L1 @@ -177,6 +171,9 @@ func (p *Pool) StoreTx(ctx context.Context, tx types.Transaction, ip string, isW preExecutionResponse, err := p.preExecuteTx(ctx, tx) if errors.Is(err, runtime.ErrIntrinsicInvalidBatchGasLimit) { return ErrGasLimit + } else if preExecutionResponse.isExecutorLevelError { + // Do not add tx to the pool + return err } else if err != nil { log.Debugf("PreExecuteTx error (this can be ignored): %v", err) } @@ -234,6 +231,7 @@ func (p *Pool) preExecuteTx(ctx context.Context, tx types.Transaction) (preExecu if processBatchResponse.Responses != nil && len(processBatchResponse.Responses) > 0 { errorToCheck := processBatchResponse.Responses[0].RomError response.isReverted = errors.Is(errorToCheck, runtime.ErrExecutionReverted) + response.isExecutorLevelError = processBatchResponse.IsExecutorLevelError response.isOOC = executor.IsROMOutOfCountersError(executor.RomErrorCode(errorToCheck)) response.isOOG = errors.Is(errorToCheck, runtime.ErrOutOfGas) response.usedZkCounters = processBatchResponse.UsedZkCounters @@ -528,31 +526,18 @@ func (p *Pool) UpdateTxWIPStatus(ctx context.Context, hash common.Hash, isWIP bo return p.storage.UpdateTxWIPStatus(ctx, hash, isWIP) } -// CalculateTxBreakEvenGasPrice calculates the break even gas price for a transaction -func (p *Pool) CalculateTxBreakEvenGasPrice(ctx context.Context, txDataLength uint64, gasUsed uint64, l1GasPrice uint64) (*big.Int, error) { +// GetDefaultMinGasPriceAllowed return the configured DefaultMinGasPriceAllowed value +func (p *Pool) GetDefaultMinGasPriceAllowed() uint64 { + return p.cfg.DefaultMinGasPriceAllowed +} + +// GetL1GasPrice returns the L1 gas price +func (p *Pool) GetL1GasPrice() uint64 { p.gasPricesMux.RLock() gasPrices := p.gasPrices p.gasPricesMux.RUnlock() - // We enforce l1GasPrice to make it consistent during the lifespan of the transaction - gasPrices.L1GasPrice = l1GasPrice - - if gasPrices.L1GasPrice == 0 { - log.Warn("Received L1 gas price 0. Skipping estimation...") - return big.NewInt(0), ErrReceivedZeroL1GasPrice - } - - // Get L2 Min Gas Price - l2MinGasPrice := uint64(float64(gasPrices.L1GasPrice) * p.cfg.EffectiveGasPrice.L1GasPriceFactor) - if l2MinGasPrice < p.cfg.DefaultMinGasPriceAllowed { - l2MinGasPrice = p.cfg.DefaultMinGasPriceAllowed - } - - // Calculate break even gas price - totalTxPrice := (gasUsed * l2MinGasPrice) + (totalRlpFieldsLength * txDataLength * gasPrices.L1GasPrice) - breakEvenGasPrice := uint64(float64(totalTxPrice/gasUsed) * p.cfg.EffectiveGasPrice.MarginFactor) - - return big.NewInt(0).SetUint64(breakEvenGasPrice), nil + return gasPrices.L1GasPrice } const ( diff --git a/pool/pool_test.go b/pool/pool_test.go index 8112a87ba0..39751375ef 100644 --- a/pool/pool_test.go +++ b/pool/pool_test.go @@ -63,13 +63,8 @@ var ( DefaultMinGasPriceAllowed: 1000000000, IntervalToRefreshBlockedAddresses: cfgTypes.NewDuration(5 * time.Minute), IntervalToRefreshGasPrices: cfgTypes.NewDuration(5 * time.Second), - EffectiveGasPrice: pool.EffectiveGasPrice{ - L1GasPriceFactor: 10, - ByteGasCost: 16, - MarginFactor: 10, - }, - AccountQueue: 15, - GlobalQueue: 20, + AccountQueue: 15, + GlobalQueue: 20, } gasPrice = big.NewInt(1000000000) l1GasPrice = big.NewInt(1000000000000) @@ -1383,13 +1378,8 @@ func Test_BlockedAddress(t *testing.T) { DefaultMinGasPriceAllowed: 1000000000, IntervalToRefreshBlockedAddresses: cfgTypes.NewDuration(5 * time.Second), IntervalToRefreshGasPrices: cfgTypes.NewDuration(5 * time.Second), - EffectiveGasPrice: pool.EffectiveGasPrice{ - L1GasPriceFactor: 10, - ByteGasCost: 16, - MarginFactor: 10, - }, - AccountQueue: 64, - GlobalQueue: 1024, + AccountQueue: 64, + GlobalQueue: 1024, } p := setupPool(t, cfg, s, st, chainID.Uint64(), ctx, eventLog) diff --git a/proto/src/proto/executor/v1/executor.proto b/proto/src/proto/executor/v1/executor.proto index a950426033..a195603098 100644 --- a/proto/src/proto/executor/v1/executor.proto +++ b/proto/src/proto/executor/v1/executor.proto @@ -4,7 +4,7 @@ import "google/protobuf/empty.proto"; package executor.v1; -option go_package = "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb"; +option go_package = "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor"; service ExecutorService { /// Processes a batch @@ -308,6 +308,8 @@ enum RomError { ROM_ERROR_BATCH_DATA_TOO_BIG = 27; // ROM_ERROR_UNSUPPORTED_FORK_ID indicates that the fork id is not supported ROM_ERROR_UNSUPPORTED_FORK_ID = 28; + // ROM_ERROR_INVALID_RLP indicates that there has been an error while parsing the RLP + ROM_ERROR_INVALID_RLP = 29; } enum ExecutorError { diff --git a/sequencer/addrqueue.go b/sequencer/addrqueue.go index 84d5e4e90d..684a060aed 100644 --- a/sequencer/addrqueue.go +++ b/sequencer/addrqueue.go @@ -32,31 +32,48 @@ func newAddrQueue(addr common.Address, nonce uint64, balance *big.Int) *addrQueu } } -// addTx adds a tx to the addrQueue and updates the ready a notReady Txs -func (a *addrQueue) addTx(tx *TxTracker) (newReadyTx, prevReadyTx *TxTracker, dropReason error) { +// addTx adds a tx to the addrQueue and updates the ready a notReady Txs. Also if the new tx matches +// an existing tx with the same nonce but the new tx has better or equal gasPrice, we will return in the replacedTx +// the existing tx with lower gasPrice (the replacedTx will be later set as failed in the pool). +// If the existing tx has better gasPrice then we will drop the new tx (dropReason = ErrDuplicatedNonce) +func (a *addrQueue) addTx(tx *TxTracker) (newReadyTx, prevReadyTx, replacedTx *TxTracker, dropReason error) { + var repTx *TxTracker + if a.currentNonce == tx.Nonce { // Is a possible readyTx // We set the tx as readyTx if we do not have one assigned or if the gasPrice is better or equal than the current readyTx if a.readyTx == nil || ((a.readyTx != nil) && (tx.GasPrice.Cmp(a.readyTx.GasPrice) >= 0)) { oldReadyTx := a.readyTx - if a.currentBalance.Cmp(tx.Cost) >= 0 { // + if (oldReadyTx != nil) && (oldReadyTx.HashStr != tx.HashStr) { + // if it is a different tx then we need to return the replaced tx to set as failed in the pool + repTx = oldReadyTx + } + if a.currentBalance.Cmp(tx.Cost) >= 0 { a.readyTx = tx - return tx, oldReadyTx, nil + return tx, oldReadyTx, repTx, nil } else { // If there is not enough balance we set the new tx as notReadyTxs a.readyTx = nil a.notReadyTxs[tx.Nonce] = tx - return nil, oldReadyTx, nil + return nil, oldReadyTx, repTx, nil } + } else { // We have an already readytx with the same nonce and better gas price, we discard the new tx + return nil, nil, nil, ErrDuplicatedNonce } } else if a.currentNonce > tx.Nonce { - return nil, nil, runtime.ErrIntrinsicInvalidNonce + return nil, nil, nil, runtime.ErrIntrinsicInvalidNonce } nrTx, found := a.notReadyTxs[tx.Nonce] if !found || ((found) && (tx.GasPrice.Cmp(nrTx.GasPrice) >= 0)) { a.notReadyTxs[tx.Nonce] = tx + if (found) && (nrTx.HashStr != tx.HashStr) { + // if it is a different tx then we need to return the replaced tx to set as failed in the pool + repTx = nrTx + } + return nil, nil, repTx, nil + } else { + // We have an already notReadytx with the same nonce and better gas price, we discard the new tx + return nil, nil, nil, ErrDuplicatedNonce } - - return nil, nil, nil } // ExpireTransactions removes the txs that have been in the queue for more than maxTime diff --git a/sequencer/addrqueue_test.go b/sequencer/addrqueue_test.go index 2926af696d..d39ce5a356 100644 --- a/sequencer/addrqueue_test.go +++ b/sequencer/addrqueue_test.go @@ -20,6 +20,8 @@ type addrQueueAddTxTestCase struct { cost *big.Int expectedReadyTx common.Hash expectedNotReadyTx []notReadyTx + expectedReplacedTx common.Hash + err error } var addr addrQueue @@ -35,10 +37,10 @@ func processAddTxTestCases(t *testing.T, testCases []addrQueueAddTxTestCase) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { tx := newTestTxTracker(tc.hash, tc.nonce, tc.gasPrice, tc.cost) - newReadyTx, _, _ := addr.addTx(tx) + newReadyTx, _, replacedTx, err := addr.addTx(tx) if tc.expectedReadyTx.String() == emptyHash.String() { if !(addr.readyTx == nil) { - t.Fatalf("Error readyTx. Expected=%s, Actual=%s", tc.expectedReadyTx, "") + t.Fatalf("Error readyTx. Expected=nil, Actual=%s", addr.readyTx.HashStr) } if !(newReadyTx == nil) { t.Fatalf("Error newReadyTx. Expected=nil, Actual=%s", newReadyTx.HashStr) @@ -58,6 +60,30 @@ func processAddTxTestCases(t *testing.T, testCases []addrQueueAddTxTestCase) { t.Fatalf("Error notReadyTx nonce=%d. Expected=%s, Actual=%s", nr.nonce, nr.hash.String(), txTmp.HashStr) } } + + if tc.expectedReplacedTx.String() == emptyHash.String() { + if !(replacedTx == nil) { + t.Fatalf("Error replacedTx. Expected=%s, Actual=%s", tc.expectedReplacedTx, replacedTx.HashStr) + } + } else { + if (replacedTx == nil) || ((replacedTx != nil) && !(replacedTx.Hash == tc.expectedReplacedTx)) { + replacedTxStr := "nil" + if replacedTx != nil { + replacedTxStr = replacedTx.HashStr + } + t.Fatalf("Error replacedTx. Expected=%s, Actual=%s", tc.expectedReplacedTx, replacedTxStr) + } + } + + if tc.err == nil { + if err != nil { + t.Fatalf("Error returned error. Expected=nil, Actual=%s", err) + } + } else { + if tc.err != err { + t.Fatalf("Error returned error. Expected=%s, Actual=%s", tc.err, err) + } + } }) } } @@ -67,22 +93,37 @@ func TestAddrQueue(t *testing.T) { addTxTestCases := []addrQueueAddTxTestCase{ { - name: "Add not ready tx 0x02", hash: common.Hash{0x2}, nonce: 2, gasPrice: new(big.Int).SetInt64(2), cost: new(big.Int).SetInt64(5), + name: "Add not ready tx 0x02 nonce 2", hash: common.Hash{0x2}, nonce: 2, gasPrice: new(big.Int).SetInt64(2), cost: new(big.Int).SetInt64(5), expectedReadyTx: common.Hash{}, expectedNotReadyTx: []notReadyTx{ {nonce: 2, hash: common.Hash{0x2}}, }, }, { - name: "Add ready tx 0x01", hash: common.Hash{0x1}, nonce: 1, gasPrice: new(big.Int).SetInt64(2), cost: new(big.Int).SetInt64(5), - expectedReadyTx: common.Hash{1}, + name: "Add ready tx 0x011 nonce 1", hash: common.Hash{0x11}, nonce: 1, gasPrice: new(big.Int).SetInt64(5), cost: new(big.Int).SetInt64(5), + expectedReadyTx: common.Hash{0x11}, + expectedNotReadyTx: []notReadyTx{ + {nonce: 2, hash: common.Hash{0x2}}, + }, + }, + { + name: "Replace readyTx 0x11 by tx 0x1 with best gasPrice", hash: common.Hash{0x1}, nonce: 1, gasPrice: new(big.Int).SetInt64(6), cost: new(big.Int).SetInt64(5), + expectedReadyTx: common.Hash{0x1}, + expectedNotReadyTx: []notReadyTx{ + {nonce: 2, hash: common.Hash{0x2}}, + }, + expectedReplacedTx: common.Hash{0x11}, + }, + { + name: "Replace readyTx for the same tx 0x1 with best gasPrice", hash: common.Hash{0x1}, nonce: 1, gasPrice: new(big.Int).SetInt64(8), cost: new(big.Int).SetInt64(5), + expectedReadyTx: common.Hash{0x1}, expectedNotReadyTx: []notReadyTx{ {nonce: 2, hash: common.Hash{0x2}}, }, }, { - name: "Add not ready tx 0x04", hash: common.Hash{0x4}, nonce: 4, gasPrice: new(big.Int).SetInt64(2), cost: new(big.Int).SetInt64(5), - expectedReadyTx: common.Hash{1}, + name: "Add not ready tx 0x04 nonce 4", hash: common.Hash{0x4}, nonce: 4, gasPrice: new(big.Int).SetInt64(2), cost: new(big.Int).SetInt64(5), + expectedReadyTx: common.Hash{0x1}, expectedNotReadyTx: []notReadyTx{ {nonce: 2, hash: common.Hash{0x2}}, {nonce: 4, hash: common.Hash{0x4}}, @@ -90,18 +131,38 @@ func TestAddrQueue(t *testing.T) { }, { name: "Replace tx with nonce 4 for tx 0x44 with best GasPrice", hash: common.Hash{0x44}, nonce: 4, gasPrice: new(big.Int).SetInt64(3), cost: new(big.Int).SetInt64(5), - expectedReadyTx: common.Hash{1}, + expectedReadyTx: common.Hash{0x1}, + expectedNotReadyTx: []notReadyTx{ + {nonce: 2, hash: common.Hash{0x2}}, + {nonce: 4, hash: common.Hash{0x44}}, + }, + expectedReplacedTx: common.Hash{0x4}, + }, + { + name: "Replace tx with nonce 4 for the same tx 0x44 with best GasPrice", hash: common.Hash{0x44}, nonce: 4, gasPrice: new(big.Int).SetInt64(4), cost: new(big.Int).SetInt64(5), + expectedReadyTx: common.Hash{0x1}, + expectedNotReadyTx: []notReadyTx{ + {nonce: 2, hash: common.Hash{0x2}}, + {nonce: 4, hash: common.Hash{0x44}}, + }, + expectedReplacedTx: common.Hash{}, + }, + { + name: "Add tx 0x22 with nonce 2 with lower GasPrice than 0x2", hash: common.Hash{0x22}, nonce: 2, gasPrice: new(big.Int).SetInt64(1), cost: new(big.Int).SetInt64(5), + expectedReadyTx: common.Hash{0x1}, expectedNotReadyTx: []notReadyTx{ {nonce: 2, hash: common.Hash{0x2}}, {nonce: 4, hash: common.Hash{0x44}}, }, + expectedReplacedTx: common.Hash{}, + err: ErrDuplicatedNonce, }, } processAddTxTestCases(t, addTxTestCases) t.Run("Delete readyTx 0x01", func(t *testing.T) { - tc := addTxTestCases[1] + tc := addTxTestCases[2] tx := newTestTxTracker(tc.hash, tc.nonce, tc.gasPrice, tc.cost) deltx := addr.deleteTx(tx.Hash) if !(addr.readyTx == nil) { diff --git a/sequencer/closingsignalsmanager_test.go b/sequencer/closingsignalsmanager_test.go index 763e76b560..cb09f40e34 100644 --- a/sequencer/closingsignalsmanager_test.go +++ b/sequencer/closingsignalsmanager_test.go @@ -14,7 +14,6 @@ import ( mtDBclientpb "github.com/0xPolygonHermez/zkevm-node/merkletree/pb" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" - executorclientpb "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" "github.com/0xPolygonHermez/zkevm-node/test/dbutils" "github.com/0xPolygonHermez/zkevm-node/test/testutils" "github.com/ethereum/go-ethereum/common" @@ -34,7 +33,7 @@ var ( localMtDBServiceClient mtDBclientpb.HashDBServiceClient localMtDBClientConn, localExecutorClientConn *grpc.ClientConn localState *state.State - localExecutorClient executorclientpb.ExecutorServiceClient + localExecutorClient executor.ExecutorServiceClient testGER = common.HexToAddress("0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D") testAddr = common.HexToAddress("0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D") testRawData = common.Hex2Bytes("0xee80843b9aca00830186a0944d5cf5032b2a844602278b01199ed191a86c93ff88016345785d8a0000808203e880801cee7e01dc62f69a12c3510c6d64de04ee6346d84b6a017f3e786c7d87f963e75d8cc91fa983cd6d9cf55fff80d73bd26cd333b0f098acc1e58edb1fd484ad731b") diff --git a/sequencer/config.go b/sequencer/config.go index eb0a121697..9b99af333b 100644 --- a/sequencer/config.go +++ b/sequencer/config.go @@ -125,8 +125,8 @@ type FinalizerCfg struct { // TimestampResolution is the resolution of the timestamp used to close a batch TimestampResolution types.Duration `mapstructure:"TimestampResolution"` - // ForkID is the fork id of the chain - ForkID uint64 `mapstructure:"ForkID"` + // StopSequencerOnBatchNum specifies the batch number where the Sequencer will stop to process more transactions and generate new batches. The Sequencer will halt after it closes the batch equal to this number + StopSequencerOnBatchNum uint64 `mapstructure:"StopSequencerOnBatchNum"` } // WorkerCfg contains the Worker's configuration properties @@ -139,7 +139,6 @@ type WorkerCfg struct { type DBManagerCfg struct { PoolRetrievalInterval types.Duration `mapstructure:"PoolRetrievalInterval"` L2ReorgRetrievalInterval types.Duration `mapstructure:"L2ReorgRetrievalInterval"` - ForkID uint64 `mapstructure:"ForkID"` } // EffectiveGasPriceCfg contains the configuration properties for the effective gas price @@ -147,6 +146,19 @@ type EffectiveGasPriceCfg struct { // MaxBreakEvenGasPriceDeviationPercentage is the max allowed deviation percentage BreakEvenGasPrice on re-calculation MaxBreakEvenGasPriceDeviationPercentage uint64 `mapstructure:"MaxBreakEvenGasPriceDeviationPercentage"` + // L1GasPriceFactor is the percentage of the L1 gas price that will be used as the L2 min gas price + L1GasPriceFactor float64 `mapstructure:"L1GasPriceFactor"` + + // ByteGasCost is the gas cost per byte + ByteGasCost uint64 `mapstructure:"ByteGasCost"` + + // MarginFactor is the margin factor percentage to be added to the L2 min gas price + MarginFactor float64 `mapstructure:"MarginFactor"` + // Enabled is a flag to enable/disable the effective gas price Enabled bool `mapstructure:"Enabled"` + + // DefaultMinGasPriceAllowed is the default min gas price to suggest + // This value is assigned from [Pool].DefaultMinGasPriceAllowed + DefaultMinGasPriceAllowed uint64 } diff --git a/sequencer/dbmanager.go b/sequencer/dbmanager.go index 285c23bde5..b47de338ef 100644 --- a/sequencer/dbmanager.go +++ b/sequencer/dbmanager.go @@ -139,17 +139,20 @@ func (d *dbManager) addTxToWorker(tx pool.Transaction) error { if err != nil { return err } - dropReason, isWIP := d.worker.AddTxTracker(d.ctx, txTracker) + replacedTx, dropReason := d.worker.AddTxTracker(d.ctx, txTracker) if dropReason != nil { failedReason := dropReason.Error() return d.txPool.UpdateTxStatus(d.ctx, txTracker.Hash, pool.TxStatusFailed, false, &failedReason) } else { - if isWIP { - return d.txPool.UpdateTxWIPStatus(d.ctx, tx.Hash(), true) + if replacedTx != nil { + failedReason := ErrReplacedTransaction.Error() + error := d.txPool.UpdateTxStatus(d.ctx, replacedTx.Hash, pool.TxStatusFailed, false, &failedReason) + if error != nil { + log.Warnf("error when setting as failed replacedTx(%s)", replacedTx.HashStr) + } } + return d.txPool.UpdateTxWIPStatus(d.ctx, tx.Hash(), true) } - - return nil } // BeginStateTransaction starts a db transaction in the state @@ -183,7 +186,8 @@ func (d *dbManager) StoreProcessedTxAndDeleteFromPool(ctx context.Context, tx tr return err } - txData, err := state.EncodeTransaction(tx.response.Tx, uint8(tx.response.EffectivePercentage), d.cfg.ForkID) + forkID := d.state.GetForkIDByBatchNumber(tx.batchNumber) + txData, err := state.EncodeTransaction(tx.response.Tx, uint8(tx.response.EffectivePercentage), forkID) if err != nil { return err } @@ -236,7 +240,8 @@ func (d *dbManager) GetWIPBatch(ctx context.Context) (*WipBatch, error) { previousLastBatch = lastBatches[1] } - lastBatchTxs, _, _, err := state.DecodeTxs(lastBatch.BatchL2Data, d.cfg.ForkID) + forkID := d.state.GetForkIDByBatchNumber(lastBatch.BatchNumber) + lastBatchTxs, _, _, err := state.DecodeTxs(lastBatch.BatchL2Data, forkID) if err != nil { return nil, err } @@ -387,7 +392,8 @@ func (d *dbManager) CloseBatch(ctx context.Context, params ClosingBatchParameter ClosingReason: params.ClosingReason, } - batchL2Data, err := state.EncodeTransactions(params.Txs, params.EffectivePercentages, d.cfg.ForkID) + forkID := d.state.GetForkIDByBatchNumber(params.BatchNumber) + batchL2Data, err := state.EncodeTransactions(params.Txs, params.EffectivePercentages, forkID) if err != nil { return err } @@ -557,9 +563,13 @@ func (d *dbManager) GetGasPrices(ctx context.Context) (pool.GasPrices, error) { return d.txPool.GetGasPrices(ctx) } -// CalculateTxBreakEvenGasPrice calculates the break even gas price for a transaction -func (d *dbManager) CalculateTxBreakEvenGasPrice(ctx context.Context, txDataLength uint64, gasUsed uint64, l1GasPrice uint64) (*big.Int, error) { - return d.txPool.CalculateTxBreakEvenGasPrice(ctx, txDataLength, gasUsed, l1GasPrice) +// GetDefaultMinGasPriceAllowed return the configured DefaultMinGasPriceAllowed value +func (d *dbManager) GetDefaultMinGasPriceAllowed() uint64 { + return d.txPool.GetDefaultMinGasPriceAllowed() +} + +func (d *dbManager) GetL1GasPrice() uint64 { + return d.txPool.GetL1GasPrice() } // GetStoredFlushID returns the stored flush ID and prover ID @@ -571,3 +581,8 @@ func (d *dbManager) GetStoredFlushID(ctx context.Context) (uint64, string, error func (d *dbManager) GetForcedBatch(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.ForcedBatch, error) { return d.state.GetForcedBatch(ctx, forcedBatchNumber, dbTx) } + +// GetForkIDByBatchNumber returns the fork id for a given batch number +func (d *dbManager) GetForkIDByBatchNumber(batchNumber uint64) uint64 { + return d.state.GetForkIDByBatchNumber(batchNumber) +} diff --git a/sequencer/dbmanager_test.go b/sequencer/dbmanager_test.go index 0933e69fcc..647a134dbf 100644 --- a/sequencer/dbmanager_test.go +++ b/sequencer/dbmanager_test.go @@ -14,7 +14,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/merkletree" mtDBclientpb "github.com/0xPolygonHermez/zkevm-node/merkletree/pb" "github.com/0xPolygonHermez/zkevm-node/state" - executorclientpb "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" + "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/0xPolygonHermez/zkevm-node/test/dbutils" "github.com/0xPolygonHermez/zkevm-node/test/testutils" "github.com/ethereum/go-ethereum/common" @@ -36,7 +36,7 @@ var ( ChainID: 1000, } dbManagerCfg = DBManagerCfg{PoolRetrievalInterval: types.NewDuration(500 * time.Millisecond)} - executorClient executorclientpb.ExecutorServiceClient + executorClient executor.ExecutorServiceClient mtDBServiceClient mtDBclientpb.HashDBServiceClient mtDBClientConn *grpc.ClientConn testDbManager *dbManager diff --git a/sequencer/effective_gas_price.go b/sequencer/effective_gas_price.go index 1ee42c6156..001c3cf80a 100644 --- a/sequencer/effective_gas_price.go +++ b/sequencer/effective_gas_price.go @@ -1,33 +1,129 @@ package sequencer import ( - "errors" + "context" + "fmt" "math/big" -) + "time" -const ( - bits = 256 + "github.com/0xPolygonHermez/zkevm-node/event" + "github.com/0xPolygonHermez/zkevm-node/log" + "github.com/0xPolygonHermez/zkevm-node/state" ) -var ( - bitsBigInt = big.NewInt(bits) - hundredPercentInBits = big.NewInt(bits - 1) +// CalculateTxBreakEvenGasPrice calculates the break even gas price for a transaction +func (f *finalizer) CalculateTxBreakEvenGasPrice(tx *TxTracker, gasUsed uint64) (*big.Int, error) { + const ( + // constants used in calculation of BreakEvenGasPrice + signatureBytesLength = 65 + effectivePercentageBytesLength = 1 + totalRlpFieldsLength = signatureBytesLength + effectivePercentageBytesLength + ) - // ErrBreakEvenGasPriceEmpty happens when the breakEven or gasPrice is nil or zero - ErrBreakEvenGasPriceEmpty = errors.New("breakEven and gasPrice cannot be nil or zero") - // ErrEffectiveGasPriceReprocess happens when the effective gas price requires reexecution - ErrEffectiveGasPriceReprocess = errors.New("effective gas price requires reprocessing the transaction") -) + if tx.L1GasPrice == 0 { + log.Warn("CalculateTxBreakEvenGasPrice: L1 gas price 0. Skipping estimation for tx %s", tx.HashStr) + return nil, ErrZeroL1GasPrice + } + + if gasUsed == 0 { + // Returns tx.GasPrice as the breakEvenGasPrice + return tx.GasPrice, nil + } + + // Get L2 Min Gas Price + l2MinGasPrice := uint64(float64(tx.L1GasPrice) * f.effectiveGasPriceCfg.L1GasPriceFactor) + if l2MinGasPrice < f.defaultMinGasPriceAllowed { + l2MinGasPrice = f.defaultMinGasPriceAllowed + } + + // Calculate BreakEvenGasPrice + totalTxPrice := (gasUsed * l2MinGasPrice) + ((totalRlpFieldsLength + tx.BatchResources.Bytes) * f.effectiveGasPriceCfg.ByteGasCost * tx.L1GasPrice) + breakEvenGasPrice := big.NewInt(0).SetUint64(uint64(float64(totalTxPrice/gasUsed) * f.effectiveGasPriceCfg.MarginFactor)) + + return breakEvenGasPrice, nil +} + +// CompareTxBreakEvenGasPrice calculates the newBreakEvenGasPrice with the newGasUsed and compares it with +// the tx.BreakEvenGasPrice. It returns ErrEffectiveGasPriceReprocess if the tx needs to be reprocessed with +// the tx.BreakEvenGasPrice updated, otherwise it returns nil +func (f *finalizer) CompareTxBreakEvenGasPrice(ctx context.Context, tx *TxTracker, newGasUsed uint64) error { + // Increase nunber of executions related to gas price + tx.EffectiveGasPriceProcessCount++ + + newBreakEvenGasPrice, err := f.CalculateTxBreakEvenGasPrice(tx, newGasUsed) + if err != nil { + log.Errorf("failed to calculate breakEvenPrice with new gasUsed for tx %s, error: %s", tx.HashStr, err.Error()) + return err + } + + // if newBreakEvenGasPrice >= tx.GasPrice then we do a final reprocess using tx.GasPrice + if newBreakEvenGasPrice.Cmp(tx.GasPrice) >= 0 { + tx.BreakEvenGasPrice = tx.GasPrice + tx.IsEffectiveGasPriceFinalExecution = true + return ErrEffectiveGasPriceReprocess + } else { //newBreakEvenGasPrice < tx.GasPrice + // Compute the abosulte difference between tx.BreakEvenGasPrice - newBreakEvenGasPrice + diff := new(big.Int).Abs(new(big.Int).Sub(tx.BreakEvenGasPrice, newBreakEvenGasPrice)) + // Compute max difference allowed of breakEvenGasPrice + maxDiff := new(big.Int).Div(new(big.Int).Mul(tx.BreakEvenGasPrice, f.maxBreakEvenGasPriceDeviationPercentage), big.NewInt(100)) //nolint:gomnd + + // if diff is greater than the maxDiff allowed + if diff.Cmp(maxDiff) == 1 { + if tx.EffectiveGasPriceProcessCount < 2 { //nolint:gomnd + // it is the first process of the tx we reprocess it with the newBreakEvenGasPrice + tx.BreakEvenGasPrice = newBreakEvenGasPrice + return ErrEffectiveGasPriceReprocess + } else { + // it is the second process attempt. It makes no sense to have a big diff at + // this point, for this reason we do a final reprocess using tx.GasPrice. + // Also we generate a critical event as this tx needs to be analized since + tx.BreakEvenGasPrice = tx.GasPrice + tx.IsEffectiveGasPriceFinalExecution = true + ev := &event.Event{ + ReceivedAt: time.Now(), + Source: event.Source_Node, + Component: event.Component_Sequencer, + Level: event.Level_Critical, + EventID: event.EventID_FinalizerBreakEvenGasPriceBigDifference, + Description: fmt.Sprintf("The difference: %s between the breakEvenGasPrice and the newBreakEvenGasPrice is more than %d %%", diff.String(), f.effectiveGasPriceCfg.MaxBreakEvenGasPriceDeviationPercentage), + Json: struct { + transactionHash string + preExecutionBreakEvenGasPrice string + newBreakEvenGasPrice string + diff string + deviation string + }{ + transactionHash: tx.Hash.String(), + preExecutionBreakEvenGasPrice: tx.BreakEvenGasPrice.String(), + newBreakEvenGasPrice: newBreakEvenGasPrice.String(), + diff: diff.String(), + deviation: maxDiff.String(), + }, + } + err = f.eventLog.LogEvent(ctx, ev) + if err != nil { + log.Errorf("failed to log event: %s", err.Error()) + } + return ErrEffectiveGasPriceReprocess + } + } // if the diff < maxDiff it is ok, no reprocess of the tx is needed + } + + return nil +} // CalculateEffectiveGasPricePercentage calculates the gas price's effective percentage func CalculateEffectiveGasPricePercentage(gasPrice *big.Int, breakEven *big.Int) (uint8, error) { + const bits = 256 + var bitsBigInt = big.NewInt(bits) + if breakEven == nil || gasPrice == nil || gasPrice.Cmp(big.NewInt(0)) == 0 || breakEven.Cmp(big.NewInt(0)) == 0 { return 0, ErrBreakEvenGasPriceEmpty } if gasPrice.Cmp(breakEven) <= 0 { - return uint8(hundredPercentInBits.Uint64()), nil + return state.MaxEffectivePercentage, nil } // Simulate Ceil with integer division @@ -35,6 +131,8 @@ func CalculateEffectiveGasPricePercentage(gasPrice *big.Int, breakEven *big.Int) b = b.Add(b, gasPrice) b = b.Sub(b, big.NewInt(1)) //nolint:gomnd b = b.Div(b, gasPrice) + // At this point we have a percentage between 1-256, we need to sub 1 to have it between 0-255 (byte) + b = b.Sub(b, big.NewInt(1)) //nolint:gomnd return uint8(b.Uint64()), nil } diff --git a/sequencer/effective_gas_price_test.go b/sequencer/effective_gas_price_test.go index 053edd94d4..312faac0b8 100644 --- a/sequencer/effective_gas_price_test.go +++ b/sequencer/effective_gas_price_test.go @@ -16,35 +16,75 @@ func TestCalcGasPriceEffectivePercentage(t *testing.T) { err error }{ { - name: "Nil breakEven or gasPrice", - gasPrice: big.NewInt(1), - err: ErrBreakEvenGasPriceEmpty, + name: "Nil breakEven or gasPrice", + gasPrice: big.NewInt(1), + expectedValue: uint8(0), }, { - name: "Zero breakEven or gasPrice", - breakEven: big.NewInt(1), - gasPrice: big.NewInt(0), - err: ErrBreakEvenGasPriceEmpty, + name: "Zero breakEven or gasPrice", + breakEven: big.NewInt(1), + gasPrice: big.NewInt(0), + expectedValue: uint8(0), }, { name: "Both positive, gasPrice less than breakEven", breakEven: big.NewInt(22000000000), gasPrice: big.NewInt(11000000000), expectedValue: uint8(255), - err: nil, }, { name: "Both positive, gasPrice more than breakEven", breakEven: big.NewInt(19800000000), gasPrice: big.NewInt(22000000000), - expectedValue: uint8(231), - err: nil, + expectedValue: uint8(230), + }, + { + name: "100% (255) effective percentage 1", + gasPrice: big.NewInt(22000000000), + breakEven: big.NewInt(22000000000), + expectedValue: 255, + }, + { + name: "100% (255) effective percentage 2", + gasPrice: big.NewInt(22000000000), + breakEven: big.NewInt(21999999999), + expectedValue: 255, + }, + { + name: "100% (255) effective percentage 3", + gasPrice: big.NewInt(22000000000), + breakEven: big.NewInt(21900000000), + expectedValue: 254, + }, + { + name: "50% (127) effective percentage", + gasPrice: big.NewInt(22000000000), + breakEven: big.NewInt(11000000000), + expectedValue: 127, + }, + { + name: "(40) effective percentage", + gasPrice: big.NewInt(1000), + breakEven: big.NewInt(157), + expectedValue: 40, + }, + { + name: "(1) effective percentage", + gasPrice: big.NewInt(1000), + breakEven: big.NewInt(1), + expectedValue: 0, + }, + { + name: "(2) effective percentage", + gasPrice: big.NewInt(1000), + breakEven: big.NewInt(4), + expectedValue: 1, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - actual, err := CalculateEffectiveGasPricePercentage(tc.gasPrice, tc.breakEven) + actual, _ := CalculateEffectiveGasPricePercentage(tc.gasPrice, tc.breakEven) assert.Equal(t, tc.err, err) if actual != 0 { assert.Equal(t, tc.expectedValue, actual) diff --git a/sequencer/errors.go b/sequencer/errors.go index d12f1b2245..afb5efec44 100644 --- a/sequencer/errors.go +++ b/sequencer/errors.go @@ -5,4 +5,15 @@ import "errors" var ( // ErrExpiredTransaction happens when the transaction is expired ErrExpiredTransaction = errors.New("transaction expired") + // ErrBreakEvenGasPriceEmpty happens when the breakEven or gasPrice is nil or zero + ErrBreakEvenGasPriceEmpty = errors.New("breakEven and gasPrice cannot be nil or zero") + // ErrEffectiveGasPriceReprocess happens when the effective gas price requires reexecution + ErrEffectiveGasPriceReprocess = errors.New("effective gas price requires reprocessing the transaction") + // ErrZeroL1GasPrice is returned if the L1 gas price is 0. + ErrZeroL1GasPrice = errors.New("L1 gas price 0") + // ErrDuplicatedNonce is returned when adding a new tx to the worker and there is an existing tx + // with the same nonce and higher gasPrice (in this case we keep the existing tx) + ErrDuplicatedNonce = errors.New("duplicated nonce") + // ErrReplacedTransaction is returned when an existing tx is replaced by a new tx with the same nonce and higher gasPrice + ErrReplacedTransaction = errors.New("replaced transaction") ) diff --git a/sequencer/finalizer.go b/sequencer/finalizer.go index 919540daa7..52311e3bf5 100644 --- a/sequencer/finalizer.go +++ b/sequencer/finalizer.go @@ -22,13 +22,13 @@ import ( ) const ( - oneHundred = 100 - pendingTxsBufferSizeMultiplier = 10 + oneHundred = 100 + pendingTxsBufferSizeMultiplier = 10 + forkId5 uint64 = 5 ) var ( - now = time.Now - maxEffectivePercentageDecodedHex, _ = hex.DecodeHex(fmt.Sprintf("%x", state.MaxEffectivePercentage)) + now = time.Now ) // finalizer represents the finalizer component of the sequencer. @@ -58,6 +58,7 @@ type finalizer struct { eventLog *event.EventLog // effective gas price calculation maxBreakEvenGasPriceDeviationPercentage *big.Int + defaultMinGasPriceAllowed uint64 // Processed txs pendingTransactionsToStore chan transactionToStore pendingTransactionsToStoreWG *sync.WaitGroup @@ -103,6 +104,7 @@ func (w *WipBatch) isEmpty() bool { func newFinalizer( cfg FinalizerCfg, effectiveGasPriceCfg EffectiveGasPriceCfg, + worker workerInterface, dbManager dbManagerInterface, executor stateInterface, @@ -151,6 +153,8 @@ func newFinalizer( // Start starts the finalizer. func (f *finalizer) Start(ctx context.Context, batch *WipBatch, processingReq *state.ProcessRequest) { + f.defaultMinGasPriceAllowed = f.dbManager.GetDefaultMinGasPriceAllowed() + var err error if batch != nil { f.batch = batch @@ -250,15 +254,18 @@ func (f *finalizer) finalizeBatches(ctx context.Context) { log.Debug("finalizer init loop") for { start := now() + if f.batch.batchNumber == f.cfg.StopSequencerOnBatchNum { + f.halt(ctx, fmt.Errorf("finalizer reached stop sequencer batch number: %v", f.cfg.StopSequencerOnBatchNum)) + } + tx := f.worker.GetBestFittingTx(f.batch.remainingResources) metrics.WorkerProcessingTime(time.Since(start)) if tx != nil { - // Timestamp resolution - if f.batch.isEmpty() { - f.batch.timestamp = now() - } - log.Debugf("processing tx: %s", tx.Hash.Hex()) + + // reset the count of effective GasPrice process attempts (since the tx may have been tried to be processed before) + tx.EffectiveGasPriceProcessCount = 0 + f.sharedResourcesMux.Lock() for { _, err := f.processTransaction(ctx, tx) @@ -519,29 +526,29 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker) (errW if tx != nil { f.processRequest.Transactions = tx.RawTx hashStr = tx.HashStr - if tx.BreakEvenGasPrice.Uint64() == 0 { + + log.Infof("EffectiveGasPriceProcessCount=%d", tx.EffectiveGasPriceProcessCount) + // If it is the first time we process this tx then we calculate the BreakEvenGasPrice + if tx.EffectiveGasPriceProcessCount == 0 { + // Get L1 gas price and store in txTracker to make it consistent during the lifespan of the transaction + tx.L1GasPrice = f.dbManager.GetL1GasPrice() + log.Infof("tx.L1GasPrice=%d", tx.L1GasPrice) // Calculate the new breakEvenPrice - tx.BreakEvenGasPrice, err = f.dbManager.CalculateTxBreakEvenGasPrice(ctx, tx.BatchResources.Bytes, tx.BatchResources.ZKCounters.CumulativeGasUsed, tx.L1GasPRice) + tx.BreakEvenGasPrice, err = f.CalculateTxBreakEvenGasPrice(tx, tx.BatchResources.ZKCounters.CumulativeGasUsed) if err != nil { if f.effectiveGasPriceCfg.Enabled { return nil, err } else { - log.Warnf("failed to calculate break even gas price: %s", err) + log.Warnf("EffectiveGasPrice is disabled, but failed to calculate BreakEvenGasPrice: %s", err) } } } - var ( - effectivePercentage uint8 - err error - ) - - effectivePercentageAsDecodedHex := maxEffectivePercentageDecodedHex + effectivePercentage := state.MaxEffectivePercentage if tx.BreakEvenGasPrice.Uint64() != 0 { - // If the tx gas price is lower than the break even gas price, we set the effective percentage to 255 (100%) + // If the tx gas price is lower than the break even gas price, we process the tx with the user gas price (100%) if tx.GasPrice.Cmp(tx.BreakEvenGasPrice) <= 0 { - effectivePercentage = state.MaxEffectivePercentage tx.IsEffectiveGasPriceFinalExecution = true } else { effectivePercentage, err = CalculateEffectiveGasPricePercentage(tx.GasPrice, tx.BreakEvenGasPrice) @@ -550,16 +557,24 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker) (errW return nil, err } } + } + log.Infof("calculated breakEvenGasPrice: %d, gasPrice: %d, effectivePercentage: %d for tx: %s", tx.BreakEvenGasPrice, tx.GasPrice, effectivePercentage, tx.HashStr) - if f.effectiveGasPriceCfg.Enabled { - effectivePercentageAsDecodedHex, err = hex.DecodeHex(fmt.Sprintf("%x", effectivePercentage)) - if err != nil { - return nil, err - } - } - log.Infof("calculated effectivePercentage: %d for tx: %s", effectivePercentage, txHash) + // If EGP is disabled we use tx GasPrice (MaxEffectivePercentage=255) + if !f.effectiveGasPriceCfg.Enabled { + effectivePercentage = state.MaxEffectivePercentage + } + + var effectivePercentageAsDecodedHex []byte + effectivePercentageAsDecodedHex, err = hex.DecodeHex(fmt.Sprintf("%x", effectivePercentage)) + if err != nil { + return nil, err + } + + forkId := f.dbManager.GetForkIDByBatchNumber(f.processRequest.BatchNumber) + if forkId >= forkId5 { + f.processRequest.Transactions = append(f.processRequest.Transactions, effectivePercentageAsDecodedHex...) } - f.processRequest.Transactions = append(f.processRequest.Transactions, effectivePercentageAsDecodedHex...) } else { f.processRequest.Transactions = []byte{} } @@ -572,6 +587,19 @@ func (f *finalizer) processTransaction(ctx context.Context, tx *TxTracker) (errW } else if tx != nil && err == nil && !processBatchResponse.IsRomLevelError && len(processBatchResponse.Responses) == 0 { err = fmt.Errorf("executor returned no errors and no responses for tx: %s", tx.HashStr) f.halt(ctx, err) + } else if tx != nil && processBatchResponse.IsExecutorLevelError { + log.Errorf("error received from executor. Error: %v", err) + // Delete tx from the worker + f.worker.DeleteTx(tx.Hash, tx.From) + + // Set tx as invalid in the pool + errMsg := processBatchResponse.ExecutorError.Error() + err = f.dbManager.UpdateTxStatus(ctx, tx.Hash, pool.TxStatusInvalid, false, &errMsg) + if err != nil { + log.Errorf("failed to update status to invalid in the pool for tx: %s, err: %s", tx.Hash.String(), err) + } else { + metrics.TxProcessed(metrics.TxProcessedLabelInvalid, 1) + } return nil, err } @@ -608,66 +636,27 @@ func (f *finalizer) handleProcessTransactionResponse(ctx context.Context, tx *Tx } if f.effectiveGasPriceCfg.Enabled && !tx.IsEffectiveGasPriceFinalExecution { - // Increase nunber of executions related to gas price - tx.EffectiveGasPriceProcessCount++ - gasPrices, err := f.dbManager.GetGasPrices(ctx) + err := f.CompareTxBreakEvenGasPrice(ctx, tx, result.Responses[0].GasUsed) if err != nil { - log.Errorf("failed to get gas prices: %s", err) return nil, err } - tx.L1GasPRice = gasPrices.L1GasPrice - actualBreakEvenPrice, err := f.dbManager.CalculateTxBreakEvenGasPrice(ctx, tx.BatchResources.Bytes, result.Responses[0].GasUsed, tx.L1GasPRice) + } else if !f.effectiveGasPriceCfg.Enabled { + reprocessNeeded := false + newBreakEvenGasPrice, err := f.CalculateTxBreakEvenGasPrice(tx, result.Responses[0].GasUsed) if err != nil { - log.Errorf("failed to calculate breakEvenPrice with actual gasUsed: %s", err.Error()) - return nil, err - } - - // if actualBreakEvenPrice < tx.BrakeEvenGasPrice - if actualBreakEvenPrice.Cmp(tx.BreakEvenGasPrice) == -1 { - // Compute the difference - diff := new(big.Int).Sub(tx.BreakEvenGasPrice, actualBreakEvenPrice) - // Compute deviation of breakEvenPrice - deviation := new(big.Int).Div(new(big.Int).Mul(tx.BreakEvenGasPrice, f.maxBreakEvenGasPriceDeviationPercentage), big.NewInt(100)) //nolint:gomnd - - if diff.Cmp(deviation) == 1 { - if tx.EffectiveGasPriceProcessCount < 2 { //nolint:gomnd - tx.BreakEvenGasPrice = actualBreakEvenPrice - return nil, ErrEffectiveGasPriceReprocess - } else { - tx.BreakEvenGasPrice = tx.GasPrice - tx.IsEffectiveGasPriceFinalExecution = true - ev := &event.Event{ - ReceivedAt: time.Now(), - Source: event.Source_Node, - Component: event.Component_Sequencer, - Level: event.Level_Critical, - EventID: event.EventID_FinalizerBreakEvenGasPriceBigDifference, - Description: fmt.Sprintf("The difference: %s between the breakEvenPrice and the actualBreakEvenPrice is more than %d %%", diff.String(), f.effectiveGasPriceCfg.MaxBreakEvenGasPriceDeviationPercentage), - Json: struct { - transactionHash string - preExecutionBreakEvenGasPrice string - actualBreakEvenGasPrice string - diff string - deviation string - }{ - transactionHash: tx.Hash.String(), - preExecutionBreakEvenGasPrice: tx.BreakEvenGasPrice.String(), - actualBreakEvenGasPrice: actualBreakEvenPrice.String(), - diff: diff.String(), - deviation: deviation.String(), - }, - } - err = f.eventLog.LogEvent(ctx, ev) - if err != nil { - log.Errorf("failed to log event: %s", err.Error()) - } - return nil, ErrEffectiveGasPriceReprocess - } - } // TODO: Review this check regarding tx.GasPrice being nil - } else if tx.GasPrice != nil && actualBreakEvenPrice.Cmp(tx.GasPrice) == 1 { - tx.BreakEvenGasPrice = tx.GasPrice - tx.IsEffectiveGasPriceFinalExecution = true - return nil, ErrEffectiveGasPriceReprocess + log.Warnf("EffectiveGasPrice is disabled, but failed to calculate BreakEvenGasPrice: %s", err) + } else { + // Compute the absolute difference between tx.BreakEvenGasPrice - newBreakEvenGasPrice + diff := new(big.Int).Abs(new(big.Int).Sub(tx.BreakEvenGasPrice, newBreakEvenGasPrice)) + // Compute max difference allowed of breakEvenGasPrice + maxDiff := new(big.Int).Div(new(big.Int).Mul(tx.BreakEvenGasPrice, f.maxBreakEvenGasPriceDeviationPercentage), big.NewInt(100)) //nolint:gomnd + + // if diff is greater than the maxDiff allowed + if diff.Cmp(maxDiff) == 1 { + reprocessNeeded = true + } + log.Infof("calculated newBreakEvenGasPrice: %d, tx.BreakEvenGasprice: %d for tx: %s", newBreakEvenGasPrice, tx.BreakEvenGasPrice, tx.HashStr) + log.Infof("Would need reprocess: %t, diff: %d, maxDiff: %d", reprocessNeeded, diff, maxDiff) } } @@ -1081,7 +1070,8 @@ func (f *finalizer) reprocessFullBatch(ctx context.Context, batchNum uint64, exp Caller: stateMetrics.SequencerCallerLabel, } log.Infof("reprocessFullBatch: BatchNumber: %d, OldStateRoot: %s, Ger: %s", batch.BatchNumber, f.batch.initialStateRoot.String(), batch.GlobalExitRoot.String()) - txs, _, _, err := state.DecodeTxs(batch.BatchL2Data, f.cfg.ForkID) + forkID := f.dbManager.GetForkIDByBatchNumber(batchNum) + txs, _, _, err := state.DecodeTxs(batch.BatchL2Data, forkID) if err != nil { log.Errorf("reprocessFullBatch: error decoding BatchL2Data before reprocessing full batch: %d. Error: %v", batch.BatchNumber, err) diff --git a/sequencer/finalizer_test.go b/sequencer/finalizer_test.go index 69068fd34f..74751f453c 100644 --- a/sequencer/finalizer_test.go +++ b/sequencer/finalizer_test.go @@ -18,7 +18,6 @@ import ( stateMetrics "github.com/0xPolygonHermez/zkevm-node/state/metrics" "github.com/0xPolygonHermez/zkevm-node/state/runtime" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" "github.com/0xPolygonHermez/zkevm-node/test/constants" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -53,7 +52,10 @@ var ( } effectiveGasPriceCfg = EffectiveGasPriceCfg{ MaxBreakEvenGasPriceDeviationPercentage: 10, - Enabled: true, + L1GasPriceFactor: 0.25, + ByteGasCost: 16, + MarginFactor: 1, + Enabled: false, } cfg = FinalizerCfg{ GERDeadlineTimeout: cfgTypes.Duration{ @@ -99,7 +101,9 @@ var ( testBatchL2DataAsString = "0xee80843b9aca00830186a0944d5cf5032b2a844602278b01199ed191a86c93ff88016345785d8a0000808203e980801186622d03b6b8da7cf111d1ccba5bb185c56deae6a322cebc6dda0556f3cb9700910c26408b64b51c5da36ba2f38ef55ba1cee719d5a6c012259687999074321bff" decodedBatchL2Data []byte done chan bool + gasPrice = big.NewInt(1000000) breakEvenGasPrice = big.NewInt(1000000) + l1GasPrice = uint64(1000000) ) func testNow() time.Time { @@ -130,9 +134,13 @@ func TestNewFinalizer(t *testing.T) { func TestFinalizer_handleProcessTransactionResponse(t *testing.T) { f = setupFinalizer(true) ctx = context.Background() - txTracker := &TxTracker{Hash: txHash, From: senderAddr, Nonce: 1, BreakEvenGasPrice: breakEvenGasPrice, BatchResources: state.BatchResources{ + txTracker := &TxTracker{Hash: txHash, From: senderAddr, Nonce: 1, GasPrice: gasPrice, BreakEvenGasPrice: breakEvenGasPrice, L1GasPrice: l1GasPrice, BatchResources: state.BatchResources{ Bytes: 1000, + ZKCounters: state.ZKCounters{ + CumulativeGasUsed: 500, + }, }} + txResponse := &state.ProcessTransactionResponse{ TxHash: txHash, StateRoot: newHash2, @@ -272,7 +280,6 @@ func TestFinalizer_handleProcessTransactionResponse(t *testing.T) { done <- true // signal that the goroutine is done }() } - if tc.expectedDeleteTxCall { workerMock.On("DeleteTx", txTracker.Hash, txTracker.From).Return().Once() } @@ -284,8 +291,7 @@ func TestFinalizer_handleProcessTransactionResponse(t *testing.T) { workerMock.On("UpdateTx", txTracker.Hash, txTracker.From, tc.executorResponse.UsedZkCounters).Return().Once() } if tc.expectedError == nil { - dbManagerMock.On("CalculateTxBreakEvenGasPrice", ctx, txTracker.BatchResources.Bytes, txResponse.GasUsed, uint64(0)).Return(breakEvenGasPrice, nilErr).Once() - dbManagerMock.On("GetGasPrices", ctx).Return(pool.GasPrices{L1GasPrice: 0, L2GasPrice: 0}, nilErr).Once() + //dbManagerMock.On("GetGasPrices", ctx).Return(pool.GasPrices{L1GasPrice: 0, L2GasPrice: 0}, nilErr).Once() workerMock.On("DeleteTx", txTracker.Hash, txTracker.From).Return().Once() workerMock.On("UpdateAfterSingleSuccessfulTxExecution", txTracker.From, tc.executorResponse.ReadWriteAddresses).Return([]*TxTracker{}).Once() } @@ -506,6 +512,7 @@ func TestFinalizer_newWIPBatch(t *testing.T) { if tc.stateRootAndLERErr == nil { dbManagerMock.On("CloseBatch", ctx, tc.closeBatchParams).Return(tc.closeBatchErr).Once() dbManagerMock.On("GetBatchByNumber", ctx, f.batch.batchNumber, nil).Return(tc.batches[0], nilErr).Once() + dbManagerMock.On("GetForkIDByBatchNumber", f.batch.batchNumber).Return(uint64(5)).Once() dbManagerMock.On("GetTransactionsByBatchNumber", ctx, f.batch.batchNumber).Return(currTxs, constants.EffectivePercentage, nilErr).Once() if tc.forcedBatches != nil && len(tc.forcedBatches) > 0 { processRequest := f.processRequest @@ -1322,7 +1329,7 @@ func TestFinalizer_handleTransactionError(t *testing.T) { txTracker := &TxTracker{Hash: txHash, From: senderAddr, Cost: big.NewInt(0)} testCases := []struct { name string - err pb.RomError + err executor.RomError expectedDeleteCall bool updateTxStatus pool.TxStatus expectedMoveCall bool @@ -1330,26 +1337,26 @@ func TestFinalizer_handleTransactionError(t *testing.T) { }{ { name: "Error OutOfCounters", - err: pb.RomError(executor.ROM_ERROR_OUT_OF_COUNTERS_STEP), + err: executor.RomError_ROM_ERROR_OUT_OF_COUNTERS_STEP, updateTxStatus: pool.TxStatusInvalid, expectedDeleteCall: true, isRoomOOC: true, }, { name: "Error IntrinsicInvalidNonce", - err: pb.RomError(executor.ROM_ERROR_INTRINSIC_INVALID_NONCE), + err: executor.RomError_ROM_ERROR_INTRINSIC_INVALID_NONCE, updateTxStatus: pool.TxStatusFailed, expectedMoveCall: true, }, { name: "Error IntrinsicInvalidBalance", - err: pb.RomError(executor.ROM_ERROR_INTRINSIC_INVALID_BALANCE), + err: executor.RomError_ROM_ERROR_INTRINSIC_INVALID_BALANCE, updateTxStatus: pool.TxStatusFailed, expectedMoveCall: true, }, { name: "Error IntrinsicErrorChainId", - err: pb.RomError(executor.ROM_ERROR_INTRINSIC_INVALID_CHAIN_ID), + err: executor.RomError_ROM_ERROR_INTRINSIC_INVALID_CHAIN_ID, updateTxStatus: pool.TxStatusFailed, expectedDeleteCall: true, }, @@ -1406,6 +1413,9 @@ func Test_processTransaction(t *testing.T) { GasPrice: breakEvenGasPrice, BatchResources: state.BatchResources{ Bytes: 1000, + ZKCounters: state.ZKCounters{ + CumulativeGasUsed: 500, + }, }, } successfulTxResponse := &state.ProcessTransactionResponse{ @@ -1496,9 +1506,11 @@ func Test_processTransaction(t *testing.T) { }() } + dbManagerMock.On("GetL1GasPrice").Return(uint64(1000000)).Once() executorMock.On("ProcessBatch", tc.ctx, mock.Anything, true).Return(tc.expectedResponse, tc.executorErr).Once() if tc.executorErr == nil { workerMock.On("DeleteTx", tc.tx.Hash, tc.tx.From).Return().Once() + dbManagerMock.On("GetForkIDByBatchNumber", mock.Anything).Return(forkId5) } if tc.expectedErr == nil { workerMock.On("UpdateAfterSingleSuccessfulTxExecution", tc.tx.From, tc.expectedResponse.ReadWriteAddresses).Return([]*TxTracker{}).Once() @@ -1953,6 +1965,9 @@ func TestFinalizer_reprocessFullBatch(t *testing.T) { // arrange f := setupFinalizer(true) dbManagerMock.On("GetBatchByNumber", context.Background(), tc.batchNum, nil).Return(tc.mockGetBatchByNumber, tc.mockGetBatchByNumberErr).Once() + if tc.name != "Error while getting batch by number" { + dbManagerMock.On("GetForkIDByBatchNumber", f.batch.batchNumber).Return(uint64(5)).Once() + } if tc.mockGetBatchByNumberErr == nil && tc.expectedDecodeErr == nil { executorMock.On("ProcessBatch", context.Background(), mock.Anything, false).Return(tc.expectedExecutorResponse, tc.expectedExecutorErr) } diff --git a/sequencer/interfaces.go b/sequencer/interfaces.go index 40f1c6f746..181326eaaf 100644 --- a/sequencer/interfaces.go +++ b/sequencer/interfaces.go @@ -10,7 +10,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/pool" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/state/metrics" - pb "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" + "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/jackc/pgx/v4" @@ -28,7 +28,8 @@ type txPool interface { GetTxZkCountersByHash(ctx context.Context, hash common.Hash) (*state.ZKCounters, error) UpdateTxWIPStatus(ctx context.Context, hash common.Hash, isWIP bool) error GetGasPrices(ctx context.Context) (pool.GasPrices, error) - CalculateTxBreakEvenGasPrice(ctx context.Context, txDataLength uint64, gasUsed uint64, l1GasPrice uint64) (*big.Int, error) + GetDefaultMinGasPriceAllowed() uint64 + GetL1GasPrice() uint64 } // etherman contains the methods required to interact with ethereum. @@ -58,7 +59,7 @@ type stateInterface interface { GetLastStateRoot(ctx context.Context, dbTx pgx.Tx) (common.Hash, error) ProcessBatch(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) CloseBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error - ExecuteBatch(ctx context.Context, batch state.Batch, updateMerkleTree bool, dbTx pgx.Tx) (*pb.ProcessBatchResponse, error) + ExecuteBatch(ctx context.Context, batch state.Batch, updateMerkleTree bool, dbTx pgx.Tx) (*executor.ProcessBatchResponse, error) GetForcedBatch(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.ForcedBatch, error) GetLastBatch(ctx context.Context, dbTx pgx.Tx) (*state.Batch, error) GetLastBatchNumber(ctx context.Context, dbTx pgx.Tx) (uint64, error) @@ -79,13 +80,14 @@ type stateInterface interface { GetLatestGer(ctx context.Context, maxBlockNumber uint64) (state.GlobalExitRoot, time.Time, error) FlushMerkleTree(ctx context.Context) error GetStoredFlushID(ctx context.Context) (uint64, string, error) + GetForkIDByBatchNumber(batchNumber uint64) uint64 } type workerInterface interface { GetBestFittingTx(resources state.BatchResources) *TxTracker UpdateAfterSingleSuccessfulTxExecution(from common.Address, touchedAddresses map[common.Address]*state.InfoReadWrite) []*TxTracker UpdateTx(txHash common.Hash, from common.Address, ZKCounters state.ZKCounters) - AddTxTracker(ctx context.Context, txTracker *TxTracker) (dropReason error, isWIP bool) + AddTxTracker(ctx context.Context, txTracker *TxTracker) (replacedTx *TxTracker, dropReason error) MoveTxToNotReady(txHash common.Hash, from common.Address, actualNonce *uint64, actualBalance *big.Int) []*TxTracker DeleteTx(txHash common.Hash, from common.Address) HandleL2Reorg(txHashes []common.Hash) @@ -120,10 +122,12 @@ type dbManagerInterface interface { CountReorgs(ctx context.Context, dbTx pgx.Tx) (uint64, error) FlushMerkleTree(ctx context.Context) error GetGasPrices(ctx context.Context) (pool.GasPrices, error) - CalculateTxBreakEvenGasPrice(ctx context.Context, txDataLength uint64, gasUsed uint64, l1GasPrice uint64) (*big.Int, error) + GetDefaultMinGasPriceAllowed() uint64 + GetL1GasPrice() uint64 GetStoredFlushID(ctx context.Context) (uint64, string, error) StoreProcessedTxAndDeleteFromPool(ctx context.Context, tx transactionToStore) error GetForcedBatch(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.ForcedBatch, error) + GetForkIDByBatchNumber(batchNumber uint64) uint64 } type ethTxManager interface { diff --git a/sequencer/mock_db_manager.go b/sequencer/mock_db_manager.go index 52e9cba5e7..c2a12fc589 100644 --- a/sequencer/mock_db_manager.go +++ b/sequencer/mock_db_manager.go @@ -52,32 +52,6 @@ func (_m *DbManagerMock) BeginStateTransaction(ctx context.Context) (pgx.Tx, err return r0, r1 } -// CalculateTxBreakEvenGasPrice provides a mock function with given fields: ctx, txDataLength, gasUsed, l1GasPrice -func (_m *DbManagerMock) CalculateTxBreakEvenGasPrice(ctx context.Context, txDataLength uint64, gasUsed uint64, l1GasPrice uint64) (*big.Int, error) { - ret := _m.Called(ctx, txDataLength, gasUsed, l1GasPrice) - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, uint64) (*big.Int, error)); ok { - return rf(ctx, txDataLength, gasUsed, l1GasPrice) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, uint64) *big.Int); ok { - r0 = rf(ctx, txDataLength, gasUsed, l1GasPrice) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, uint64) error); ok { - r1 = rf(ctx, txDataLength, gasUsed, l1GasPrice) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // CloseBatch provides a mock function with given fields: ctx, params func (_m *DbManagerMock) CloseBatch(ctx context.Context, params ClosingBatchParameters) error { ret := _m.Called(ctx, params) @@ -210,6 +184,20 @@ func (_m *DbManagerMock) GetBatchByNumber(ctx context.Context, batchNumber uint6 return r0, r1 } +// GetDefaultMinGasPriceAllowed provides a mock function with given fields: +func (_m *DbManagerMock) GetDefaultMinGasPriceAllowed() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + // GetForcedBatch provides a mock function with given fields: ctx, forcedBatchNumber, dbTx func (_m *DbManagerMock) GetForcedBatch(ctx context.Context, forcedBatchNumber uint64, dbTx pgx.Tx) (*state.ForcedBatch, error) { ret := _m.Called(ctx, forcedBatchNumber, dbTx) @@ -262,6 +250,20 @@ func (_m *DbManagerMock) GetForcedBatchesSince(ctx context.Context, forcedBatchN return r0, r1 } +// GetForkIDByBatchNumber provides a mock function with given fields: batchNumber +func (_m *DbManagerMock) GetForkIDByBatchNumber(batchNumber uint64) uint64 { + ret := _m.Called(batchNumber) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(batchNumber) + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + // GetGasPrices provides a mock function with given fields: ctx func (_m *DbManagerMock) GetGasPrices(ctx context.Context) (pool.GasPrices, error) { ret := _m.Called(ctx) @@ -286,6 +288,20 @@ func (_m *DbManagerMock) GetGasPrices(ctx context.Context) (pool.GasPrices, erro return r0, r1 } +// GetL1GasPrice provides a mock function with given fields: +func (_m *DbManagerMock) GetL1GasPrice() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + // GetLastBatch provides a mock function with given fields: ctx func (_m *DbManagerMock) GetLastBatch(ctx context.Context) (*state.Batch, error) { ret := _m.Called(ctx) diff --git a/sequencer/mock_pool.go b/sequencer/mock_pool.go index 7e43a0a5db..40723aca09 100644 --- a/sequencer/mock_pool.go +++ b/sequencer/mock_pool.go @@ -4,7 +4,6 @@ package sequencer import ( context "context" - big "math/big" common "github.com/ethereum/go-ethereum/common" @@ -20,32 +19,6 @@ type PoolMock struct { mock.Mock } -// CalculateTxBreakEvenGasPrice provides a mock function with given fields: ctx, txDataLength, gasUsed, l1GasPrice -func (_m *PoolMock) CalculateTxBreakEvenGasPrice(ctx context.Context, txDataLength uint64, gasUsed uint64, l1GasPrice uint64) (*big.Int, error) { - ret := _m.Called(ctx, txDataLength, gasUsed, l1GasPrice) - - var r0 *big.Int - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, uint64) (*big.Int, error)); ok { - return rf(ctx, txDataLength, gasUsed, l1GasPrice) - } - if rf, ok := ret.Get(0).(func(context.Context, uint64, uint64, uint64) *big.Int); ok { - r0 = rf(ctx, txDataLength, gasUsed, l1GasPrice) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*big.Int) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, uint64, uint64, uint64) error); ok { - r1 = rf(ctx, txDataLength, gasUsed, l1GasPrice) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - // DeleteTransactionByHash provides a mock function with given fields: ctx, hash func (_m *PoolMock) DeleteTransactionByHash(ctx context.Context, hash common.Hash) error { ret := _m.Called(ctx, hash) @@ -74,6 +47,20 @@ func (_m *PoolMock) DeleteTransactionsByHashes(ctx context.Context, hashes []com return r0 } +// GetDefaultMinGasPriceAllowed provides a mock function with given fields: +func (_m *PoolMock) GetDefaultMinGasPriceAllowed() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + // GetGasPrices provides a mock function with given fields: ctx func (_m *PoolMock) GetGasPrices(ctx context.Context) (pool.GasPrices, error) { ret := _m.Called(ctx) @@ -98,6 +85,20 @@ func (_m *PoolMock) GetGasPrices(ctx context.Context) (pool.GasPrices, error) { return r0, r1 } +// GetL1GasPrice provides a mock function with given fields: +func (_m *PoolMock) GetL1GasPrice() uint64 { + ret := _m.Called() + + var r0 uint64 + if rf, ok := ret.Get(0).(func() uint64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + // GetNonWIPPendingTxs provides a mock function with given fields: ctx func (_m *PoolMock) GetNonWIPPendingTxs(ctx context.Context) ([]pool.Transaction, error) { ret := _m.Called(ctx) diff --git a/sequencer/mock_state.go b/sequencer/mock_state.go index 8bf7261b8d..4590f7fb87 100644 --- a/sequencer/mock_state.go +++ b/sequencer/mock_state.go @@ -8,12 +8,12 @@ import ( common "github.com/ethereum/go-ethereum/common" + executor "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" + metrics "github.com/0xPolygonHermez/zkevm-node/state/metrics" mock "github.com/stretchr/testify/mock" - pb "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" - pgx "github.com/jackc/pgx/v4" state "github.com/0xPolygonHermez/zkevm-node/state" @@ -119,19 +119,19 @@ func (_m *StateMock) CountReorgs(ctx context.Context, dbTx pgx.Tx) (uint64, erro } // ExecuteBatch provides a mock function with given fields: ctx, batch, updateMerkleTree, dbTx -func (_m *StateMock) ExecuteBatch(ctx context.Context, batch state.Batch, updateMerkleTree bool, dbTx pgx.Tx) (*pb.ProcessBatchResponse, error) { +func (_m *StateMock) ExecuteBatch(ctx context.Context, batch state.Batch, updateMerkleTree bool, dbTx pgx.Tx) (*executor.ProcessBatchResponse, error) { ret := _m.Called(ctx, batch, updateMerkleTree, dbTx) - var r0 *pb.ProcessBatchResponse + var r0 *executor.ProcessBatchResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, state.Batch, bool, pgx.Tx) (*pb.ProcessBatchResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, state.Batch, bool, pgx.Tx) (*executor.ProcessBatchResponse, error)); ok { return rf(ctx, batch, updateMerkleTree, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, state.Batch, bool, pgx.Tx) *pb.ProcessBatchResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, state.Batch, bool, pgx.Tx) *executor.ProcessBatchResponse); ok { r0 = rf(ctx, batch, updateMerkleTree, dbTx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*pb.ProcessBatchResponse) + r0 = ret.Get(0).(*executor.ProcessBatchResponse) } } @@ -262,6 +262,20 @@ func (_m *StateMock) GetForcedBatchesSince(ctx context.Context, forcedBatchNumbe return r0, r1 } +// GetForkIDByBatchNumber provides a mock function with given fields: batchNumber +func (_m *StateMock) GetForkIDByBatchNumber(batchNumber uint64) uint64 { + ret := _m.Called(batchNumber) + + var r0 uint64 + if rf, ok := ret.Get(0).(func(uint64) uint64); ok { + r0 = rf(batchNumber) + } else { + r0 = ret.Get(0).(uint64) + } + + return r0 +} + // GetLastBatch provides a mock function with given fields: ctx, dbTx func (_m *StateMock) GetLastBatch(ctx context.Context, dbTx pgx.Tx) (*state.Batch, error) { ret := _m.Called(ctx, dbTx) diff --git a/sequencer/mock_worker.go b/sequencer/mock_worker.go index 61fcfd0ad7..1262c09023 100644 --- a/sequencer/mock_worker.go +++ b/sequencer/mock_worker.go @@ -21,24 +21,26 @@ type WorkerMock struct { } // AddTxTracker provides a mock function with given fields: ctx, txTracker -func (_m *WorkerMock) AddTxTracker(ctx context.Context, txTracker *TxTracker) (error, bool) { +func (_m *WorkerMock) AddTxTracker(ctx context.Context, txTracker *TxTracker) (*TxTracker, error) { ret := _m.Called(ctx, txTracker) - var r0 error - var r1 bool - if rf, ok := ret.Get(0).(func(context.Context, *TxTracker) (error, bool)); ok { + var r0 *TxTracker + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *TxTracker) (*TxTracker, error)); ok { return rf(ctx, txTracker) } - if rf, ok := ret.Get(0).(func(context.Context, *TxTracker) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, *TxTracker) *TxTracker); ok { r0 = rf(ctx, txTracker) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*TxTracker) + } } - if rf, ok := ret.Get(1).(func(context.Context, *TxTracker) bool); ok { + if rf, ok := ret.Get(1).(func(context.Context, *TxTracker) error); ok { r1 = rf(ctx, txTracker) } else { - r1 = ret.Get(1).(bool) + r1 = ret.Error(1) } return r0, r1 diff --git a/sequencer/txtracker.go b/sequencer/txtracker.go index 0071fcb90a..bfc67f5b7b 100644 --- a/sequencer/txtracker.go +++ b/sequencer/txtracker.go @@ -35,7 +35,7 @@ type TxTracker struct { GasPriceEffectivePercentage uint8 EffectiveGasPriceProcessCount uint8 IsEffectiveGasPriceFinalExecution bool - L1GasPRice uint64 + L1GasPrice uint64 } // batchResourceWeightMultipliers is a struct that contains the weight multipliers for each resource diff --git a/sequencer/worker.go b/sequencer/worker.go index 4054720be6..cba6938f7c 100644 --- a/sequencer/worker.go +++ b/sequencer/worker.go @@ -45,7 +45,7 @@ func (w *Worker) NewTxTracker(tx types.Transaction, counters state.ZKCounters, i } // AddTxTracker adds a new Tx to the Worker -func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (dropReason error, isWIP bool) { +func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (replacedTx *TxTracker, dropReason error) { w.workerMutex.Lock() defer w.workerMutex.Unlock() @@ -59,19 +59,19 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (dropReason er if err != nil { dropReason = fmt.Errorf("AddTx GetLastStateRoot error: %v", err) log.Error(dropReason) - return dropReason, false + return nil, dropReason } nonce, err := w.state.GetNonceByStateRoot(ctx, tx.From, root) if err != nil { dropReason = fmt.Errorf("AddTx GetNonceByStateRoot error: %v", err) log.Error(dropReason) - return dropReason, false + return nil, dropReason } balance, err := w.state.GetBalanceByStateRoot(ctx, tx.From, root) if err != nil { dropReason = fmt.Errorf("AddTx GetBalanceByStateRoot error: %v", err) log.Error(dropReason) - return dropReason, false + return nil, dropReason } addr = newAddrQueue(tx.From, nonce.Uint64(), balance) @@ -84,25 +84,29 @@ func (w *Worker) AddTxTracker(ctx context.Context, tx *TxTracker) (dropReason er } // Add the txTracker to Addr and get the newReadyTx and prevReadyTx - log.Infof("AddTx new tx(%s) nonce(%d) cost(%s) to addrQueue(%s)", tx.Hash.String(), tx.Nonce, tx.Cost.String(), tx.FromStr) - var newReadyTx, prevReadyTx *TxTracker - newReadyTx, prevReadyTx, dropReason = addr.addTx(tx) + log.Infof("AddTx new tx(%s) nonce(%d) cost(%s) to addrQueue(%s) nonce(%d) balance(%d)", tx.HashStr, tx.Nonce, tx.Cost.String(), addr.fromStr, addr.currentNonce, addr.currentBalance) + var newReadyTx, prevReadyTx, repTx *TxTracker + newReadyTx, prevReadyTx, repTx, dropReason = addr.addTx(tx) if dropReason != nil { - log.Infof("AddTx tx(%s) dropped from addrQueue(%s)", tx.Hash.String(), tx.FromStr) - return dropReason, false + log.Infof("AddTx tx(%s) dropped from addrQueue(%s), reason: %s", tx.HashStr, tx.FromStr, dropReason.Error()) + return repTx, dropReason } // Update the EfficiencyList (if needed) if prevReadyTx != nil { - log.Infof("AddTx prevReadyTx(%s) nonce(%d) cost(%s) deleted from EfficiencyList", prevReadyTx.Hash.String(), prevReadyTx.Nonce, prevReadyTx.Cost.String()) + log.Infof("AddTx prevReadyTx(%s) nonce(%d) cost(%s) deleted from EfficiencyList", prevReadyTx.HashStr, prevReadyTx.Nonce, prevReadyTx.Cost.String()) w.efficiencyList.delete(prevReadyTx) } if newReadyTx != nil { - log.Infof("AddTx newReadyTx(%s) nonce(%d) cost(%s) added to EfficiencyList", newReadyTx.Hash.String(), newReadyTx.Nonce, newReadyTx.Cost.String()) + log.Infof("AddTx newReadyTx(%s) nonce(%d) cost(%s) added to EfficiencyList", newReadyTx.HashStr, newReadyTx.Nonce, newReadyTx.Cost.String()) w.efficiencyList.add(newReadyTx) } - return nil, true + if repTx != nil { + log.Infof("AddTx replacedTx(%s) nonce(%d) cost(%s) has been replaced", repTx.HashStr, repTx.Nonce, repTx.Cost.String()) + } + + return repTx, nil } func (w *Worker) applyAddressUpdate(from common.Address, fromNonce *uint64, fromBalance *big.Int) (*TxTracker, *TxTracker, []*TxTracker) { diff --git a/sequencer/worker_test.go b/sequencer/worker_test.go index 0d6c0a5ff8..51787f188c 100644 --- a/sequencer/worker_test.go +++ b/sequencer/worker_test.go @@ -61,7 +61,7 @@ func processWorkerAddTxTestCases(t *testing.T, worker *Worker, testCases []worke tx.updateZKCounters(testCase.counters, worker.batchConstraints, worker.batchResourceWeights) t.Logf("%s=%s", testCase.name, fmt.Sprintf("%.2f", tx.Efficiency)) - err, _ := worker.AddTxTracker(ctx, &tx) + _, err := worker.AddTxTracker(ctx, &tx) if err != nil { return } diff --git a/state/batch.go b/state/batch.go index f3c5a07f79..e1a97d728c 100644 --- a/state/batch.go +++ b/state/batch.go @@ -10,15 +10,16 @@ import ( "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state/metrics" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/jackc/pgx/v4" ) const ( - cTrue = 1 - cFalse = 0 + cTrue = 1 + cFalse = 0 + noFlushID uint64 = 0 + noProverID string = "" ) // Batch struct @@ -179,7 +180,7 @@ func (s *State) ProcessBatch(ctx context.Context, request ProcessRequest, update forkID := s.GetForkIDByBatchNumber(request.BatchNumber) // Create Batch - var processBatchRequest = &pb.ProcessBatchRequest{ + var processBatchRequest = &executor.ProcessBatchRequest{ OldBatchNum: request.BatchNumber - 1, Coinbase: request.Coinbase.String(), BatchL2Data: request.Transactions, @@ -216,7 +217,7 @@ func (s *State) ProcessBatch(ctx context.Context, request ProcessRequest, update // ExecuteBatch is used by the synchronizer to reprocess batches to compare generated state root vs stored one // It is also used by the sequencer in order to calculate used zkCounter of a WIPBatch -func (s *State) ExecuteBatch(ctx context.Context, batch Batch, updateMerkleTree bool, dbTx pgx.Tx) (*pb.ProcessBatchResponse, error) { +func (s *State) ExecuteBatch(ctx context.Context, batch Batch, updateMerkleTree bool, dbTx pgx.Tx) (*executor.ProcessBatchResponse, error) { if dbTx == nil { return nil, ErrDBTxNil } @@ -235,7 +236,7 @@ func (s *State) ExecuteBatch(ctx context.Context, batch Batch, updateMerkleTree } // Create Batch - processBatchRequest := &pb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: batch.BatchNumber - 1, Coinbase: batch.Coinbase.String(), BatchL2Data: batch.BatchL2Data, @@ -266,7 +267,7 @@ func (s *State) ExecuteBatch(ctx context.Context, batch Batch, updateMerkleTree if err != nil { log.Error("error executing batch: ", err) return nil, err - } else if processBatchResponse != nil && processBatchResponse.Error != executor.EXECUTOR_ERROR_NO_ERROR { + } else if processBatchResponse != nil && processBatchResponse.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR { err = executor.ExecutorErr(processBatchResponse.Error) s.eventLog.LogExecutorError(ctx, processBatchResponse.Error, processBatchRequest) } @@ -280,7 +281,7 @@ func uint32ToBool(value uint32) bool { } */ -func (s *State) processBatch(ctx context.Context, batchNumber uint64, batchL2Data []byte, caller metrics.CallerLabel, dbTx pgx.Tx) (*pb.ProcessBatchResponse, error) { +func (s *State) processBatch(ctx context.Context, batchNumber uint64, batchL2Data []byte, caller metrics.CallerLabel, dbTx pgx.Tx) (*executor.ProcessBatchResponse, error) { if dbTx == nil { return nil, ErrDBTxNil } @@ -317,7 +318,7 @@ func (s *State) processBatch(ctx context.Context, batchNumber uint64, batchL2Dat forkID := s.GetForkIDByBatchNumber(lastBatch.BatchNumber) // Create Batch - processBatchRequest := &pb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: lastBatch.BatchNumber - 1, Coinbase: lastBatch.Coinbase.String(), BatchL2Data: batchL2Data, @@ -333,7 +334,7 @@ func (s *State) processBatch(ctx context.Context, batchNumber uint64, batchL2Dat return s.sendBatchRequestToExecutor(ctx, processBatchRequest, caller) } -func (s *State) sendBatchRequestToExecutor(ctx context.Context, processBatchRequest *pb.ProcessBatchRequest, caller metrics.CallerLabel) (*pb.ProcessBatchResponse, error) { +func (s *State) sendBatchRequestToExecutor(ctx context.Context, processBatchRequest *executor.ProcessBatchRequest, caller metrics.CallerLabel) (*executor.ProcessBatchResponse, error) { if s.executorClient == nil { return nil, ErrExecutorNil } @@ -357,7 +358,7 @@ func (s *State) sendBatchRequestToExecutor(ctx context.Context, processBatchRequ log.Errorf("Error s.executorClient.ProcessBatch: %v", err) log.Errorf("Error s.executorClient.ProcessBatch: %s", err.Error()) log.Errorf("Error s.executorClient.ProcessBatch response: %v", res) - } else if res.Error != executor.EXECUTOR_ERROR_NO_ERROR { + } else if res.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR { err = executor.ExecutorErr(res.Error) s.eventLog.LogExecutorError(ctx, res.Error, processBatchRequest) } @@ -405,26 +406,28 @@ func (s *State) CloseBatch(ctx context.Context, receipt ProcessingReceipt, dbTx return s.PostgresStorage.closeBatch(ctx, receipt, dbTx) } -// ProcessAndStoreClosedBatch is used by the Synchronizer to add a closed batch into the data base -func (s *State) ProcessAndStoreClosedBatch(ctx context.Context, processingCtx ProcessingContext, encodedTxs []byte, dbTx pgx.Tx, caller metrics.CallerLabel) (common.Hash, error) { +// ProcessAndStoreClosedBatch is used by the Synchronizer to add a closed batch into the data base. Values returned are the new stateRoot, +// the flushID (incremental value returned by executor), +// the ProverID (executor running ID) the result of closing the batch. +func (s *State) ProcessAndStoreClosedBatch(ctx context.Context, processingCtx ProcessingContext, encodedTxs []byte, dbTx pgx.Tx, caller metrics.CallerLabel) (common.Hash, uint64, string, error) { // Decode transactions forkID := s.GetForkIDByBatchNumber(processingCtx.BatchNumber) decodedTransactions, _, _, err := DecodeTxs(encodedTxs, forkID) if err != nil && !errors.Is(err, ErrInvalidData) { log.Debugf("error decoding transactions: %v", err) - return common.Hash{}, err + return common.Hash{}, noFlushID, noProverID, err } // Open the batch and process the txs if dbTx == nil { - return common.Hash{}, ErrDBTxNil + return common.Hash{}, noFlushID, noProverID, ErrDBTxNil } if err := s.OpenBatch(ctx, processingCtx, dbTx); err != nil { - return common.Hash{}, err + return common.Hash{}, noFlushID, noProverID, err } processed, err := s.processBatch(ctx, processingCtx.BatchNumber, encodedTxs, caller, dbTx) if err != nil { - return common.Hash{}, err + return common.Hash{}, noFlushID, noProverID, err } // Sanity check @@ -437,7 +440,7 @@ func (s *State) ProcessAndStoreClosedBatch(ctx context.Context, processingCtx Pr for i := 0; i < len(processed.Responses); i++ { if !IsStateRootChanged(processed.Responses[i].Error) { if executor.IsROMOutOfCountersError(processed.Responses[i].Error) { - processed.Responses = []*pb.ProcessTransactionResponse{} + processed.Responses = []*executor.ProcessTransactionResponse{} break } @@ -455,19 +458,19 @@ func (s *State) ProcessAndStoreClosedBatch(ctx context.Context, processingCtx Pr processedBatch, err := s.convertToProcessBatchResponse(decodedTransactions, processed) if err != nil { - return common.Hash{}, err + return common.Hash{}, noFlushID, noProverID, err } if len(processedBatch.Responses) > 0 { // Store processed txs into the batch err = s.StoreTransactions(ctx, processingCtx.BatchNumber, processedBatch.Responses, dbTx) if err != nil { - return common.Hash{}, err + return common.Hash{}, noFlushID, noProverID, err } } // Close batch - return common.BytesToHash(processed.NewStateRoot), s.closeBatch(ctx, ProcessingReceipt{ + return common.BytesToHash(processed.NewStateRoot), processed.FlushId, processed.ProverId, s.closeBatch(ctx, ProcessingReceipt{ BatchNumber: processingCtx.BatchNumber, StateRoot: processedBatch.NewStateRoot, LocalExitRoot: processedBatch.NewLocalExitRoot, diff --git a/state/converters.go b/state/converters.go index bbadf022f0..9881d78567 100644 --- a/state/converters.go +++ b/state/converters.go @@ -11,7 +11,6 @@ import ( "github.com/0xPolygonHermez/zkevm-node/hex" "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" "github.com/0xPolygonHermez/zkevm-node/state/runtime/fakevm" "github.com/0xPolygonHermez/zkevm-node/state/runtime/instrumentation" "github.com/ethereum/go-ethereum/common" @@ -19,7 +18,7 @@ import ( ) // ConvertToCounters extracts ZKCounters from a ProcessBatchResponse -func ConvertToCounters(resp *pb.ProcessBatchResponse) ZKCounters { +func ConvertToCounters(resp *executor.ProcessBatchResponse) ZKCounters { return ZKCounters{ CumulativeGasUsed: resp.CumulativeGasUsed, UsedKeccakHashes: resp.CntKeccakHashes, @@ -33,11 +32,11 @@ func ConvertToCounters(resp *pb.ProcessBatchResponse) ZKCounters { } // TestConvertToProcessBatchResponse for test purposes -func (s *State) TestConvertToProcessBatchResponse(txs []types.Transaction, response *pb.ProcessBatchResponse) (*ProcessBatchResponse, error) { +func (s *State) TestConvertToProcessBatchResponse(txs []types.Transaction, response *executor.ProcessBatchResponse) (*ProcessBatchResponse, error) { return s.convertToProcessBatchResponse(txs, response) } -func (s *State) convertToProcessBatchResponse(txs []types.Transaction, response *pb.ProcessBatchResponse) (*ProcessBatchResponse, error) { +func (s *State) convertToProcessBatchResponse(txs []types.Transaction, response *executor.ProcessBatchResponse) (*ProcessBatchResponse, error) { responses, err := s.convertToProcessTransactionResponse(txs, response.Responses) if err != nil { return nil, err @@ -48,13 +47,13 @@ func (s *State) convertToProcessBatchResponse(txs []types.Transaction, response return nil, err } - isExecutorLevelError := (response.Error != executor.EXECUTOR_ERROR_NO_ERROR) + isExecutorLevelError := (response.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR) isRomLevelError := false isRomOOCError := false if response.Responses != nil { for _, resp := range response.Responses { - if resp.Error != pb.RomError_ROM_ERROR_NO_ERROR { + if resp.Error != executor.RomError_ROM_ERROR_NO_ERROR { isRomLevelError = true break } @@ -86,11 +85,11 @@ func (s *State) convertToProcessBatchResponse(txs []types.Transaction, response } // IsStateRootChanged returns true if the transaction changes the state root -func IsStateRootChanged(err pb.RomError) bool { +func IsStateRootChanged(err executor.RomError) bool { return !executor.IsIntrinsicError(err) && !executor.IsROMOutOfCountersError(err) } -func convertToReadWriteAddresses(addresses map[string]*pb.InfoReadWrite) (map[common.Address]*InfoReadWrite, error) { +func convertToReadWriteAddresses(addresses map[string]*executor.InfoReadWrite) (map[common.Address]*InfoReadWrite, error) { results := make(map[common.Address]*InfoReadWrite, len(addresses)) for addr, addrInfo := range addresses { @@ -124,7 +123,7 @@ func convertToReadWriteAddresses(addresses map[string]*pb.InfoReadWrite) (map[co return results, nil } -func (s *State) convertToProcessTransactionResponse(txs []types.Transaction, responses []*pb.ProcessTransactionResponse) ([]*ProcessTransactionResponse, error) { +func (s *State) convertToProcessTransactionResponse(txs []types.Transaction, responses []*executor.ProcessTransactionResponse) ([]*ProcessTransactionResponse, error) { results := make([]*ProcessTransactionResponse, 0, len(responses)) for i, response := range responses { trace, err := convertToStructLogArray(response.ExecutionTrace) @@ -190,7 +189,7 @@ func (s *State) convertToProcessTransactionResponse(txs []types.Transaction, res return results, nil } -func convertToLog(protoLogs []*pb.Log) []*types.Log { +func convertToLog(protoLogs []*executor.Log) []*types.Log { logs := make([]*types.Log, 0, len(protoLogs)) for _, protoLog := range protoLogs { @@ -198,10 +197,8 @@ func convertToLog(protoLogs []*pb.Log) []*types.Log { log.Address = common.HexToAddress(protoLog.Address) log.Topics = convertToTopics(protoLog.Topics) log.Data = protoLog.Data - log.BlockNumber = protoLog.BatchNumber log.TxHash = common.BytesToHash(protoLog.TxHash) log.TxIndex = uint(protoLog.TxIndex) - log.BlockHash = common.BytesToHash(protoLog.BatchHash) log.Index = uint(protoLog.Index) logs = append(logs, log) } @@ -218,7 +215,7 @@ func convertToTopics(responses [][]byte) []common.Hash { return results } -func convertToStructLogArray(responses []*pb.ExecutionTraceStep) (*[]instrumentation.StructLog, error) { +func convertToStructLogArray(responses []*executor.ExecutionTraceStep) (*[]instrumentation.StructLog, error) { results := make([]instrumentation.StructLog, 0, len(responses)) for _, response := range responses { @@ -271,7 +268,7 @@ func convertToProperMap(responses map[string]string) map[common.Hash]common.Hash return results } -func convertToExecutorTrace(callTrace *pb.CallTrace) (*instrumentation.ExecutorTrace, error) { +func convertToExecutorTrace(callTrace *executor.CallTrace) (*instrumentation.ExecutorTrace, error) { trace := new(instrumentation.ExecutorTrace) if callTrace != nil { trace.Context = convertToContext(callTrace.Context) @@ -285,7 +282,7 @@ func convertToExecutorTrace(callTrace *pb.CallTrace) (*instrumentation.ExecutorT return trace, nil } -func convertToContext(context *pb.TransactionContext) instrumentation.Context { +func convertToContext(context *executor.TransactionContext) instrumentation.Context { return instrumentation.Context{ Type: context.Type, From: context.From, @@ -301,7 +298,7 @@ func convertToContext(context *pb.TransactionContext) instrumentation.Context { } } -func convertToInstrumentationSteps(responses []*pb.TransactionStep) ([]instrumentation.Step, error) { +func convertToInstrumentationSteps(responses []*executor.TransactionStep) ([]instrumentation.Step, error) { results := make([]instrumentation.Step, 0, len(responses)) for _, response := range responses { step := new(instrumentation.Step) @@ -341,7 +338,7 @@ func convertToInstrumentationSteps(responses []*pb.TransactionStep) ([]instrumen return results, nil } -func convertToInstrumentationContract(response *pb.Contract) instrumentation.Contract { +func convertToInstrumentationContract(response *executor.Contract) instrumentation.Contract { return instrumentation.Contract{ Address: common.HexToAddress(response.Address), Caller: common.HexToAddress(response.Caller), @@ -351,7 +348,7 @@ func convertToInstrumentationContract(response *pb.Contract) instrumentation.Con } } -func convertToCounters(resp *pb.ProcessBatchResponse) ZKCounters { +func convertToCounters(resp *executor.ProcessBatchResponse) ZKCounters { return ZKCounters{ CumulativeGasUsed: resp.CumulativeGasUsed, UsedKeccakHashes: resp.CntKeccakHashes, diff --git a/state/genesis.go b/state/genesis.go index 984de56e33..e48a89a3b6 100644 --- a/state/genesis.go +++ b/state/genesis.go @@ -17,10 +17,12 @@ import ( // Genesis contains the information to populate state on creation type Genesis struct { - // GenesisBlockNum is the block number where the polygonZKEVM smc was deployed + // GenesisBlockNum is the block number where the polygonZKEVM smc was deployed on L1 GenesisBlockNum uint64 - Root common.Hash - GenesisActions []*GenesisAction + // Root hash of the genesis block + Root common.Hash + // Contracts to be deployed to L2 + GenesisActions []*GenesisAction } // GenesisAction represents one of the values set on the SMT during genesis. diff --git a/state/helper.go b/state/helper.go index bdd9c64bbd..b009047555 100644 --- a/state/helper.go +++ b/state/helper.go @@ -3,6 +3,7 @@ package state import ( "fmt" "math/big" + "sort" "strconv" "github.com/0xPolygonHermez/zkevm-node/hex" @@ -117,7 +118,7 @@ func EncodeTransaction(tx types.Transaction, effectivePercentage uint8, forkID u } // EncodeUnsignedTransaction RLP encodes the given unsigned transaction -func EncodeUnsignedTransaction(tx types.Transaction, chainID uint64, forcedNonce *uint64) ([]byte, error) { +func EncodeUnsignedTransaction(tx types.Transaction, chainID uint64, forcedNonce *uint64, forkID uint64) ([]byte, error) { v, _ := new(big.Int).SetString("0x1c", 0) r, _ := new(big.Int).SetString("0xa54492cfacf71aef702421b7fbc70636537a7b2fbe5718c5ed970a001bb7756b", 0) s, _ := new(big.Int).SetString("0x2e9fb27acc75955b898f0b12ec52aa34bf08f01db654374484b80bf12f0d841e", 0) @@ -151,6 +152,10 @@ func EncodeUnsignedTransaction(tx types.Transaction, chainID uint64, forcedNonce newSPadded := fmt.Sprintf("%064s", s.Text(hex.Base)) newVPadded := fmt.Sprintf("%02s", newV.Text(hex.Base)) effectivePercentageAsHex := fmt.Sprintf("%x", MaxEffectivePercentage) + // Only add EffectiveGasprice if forkID is equal or higher than 5 + if forkID < forkID5 { + effectivePercentageAsHex = "" + } txData, err := hex.DecodeString(hex.EncodeToString(txCodedRlp) + newRPadded + newSPadded + newVPadded + effectivePercentageAsHex) if err != nil { return nil, err @@ -332,3 +337,22 @@ func toPostgresInterval(duration string) (string, error) { return fmt.Sprintf("%s %s", duration[:len(duration)-1], pgUnit), nil } + +// CheckLogOrder checks the order of the logs. The order should be incremental +func CheckLogOrder(logs []*types.Log) bool { + logsAux := make([]*types.Log, len(logs)) + copy(logsAux, logs) + sort.Slice(logsAux, func(i, j int) bool { + return logsAux[i].Index < logsAux[j].Index + }) + if len(logs) != len(logsAux) { + return false + } + for i := range logs { + if logsAux[i].Index != logs[i].Index { + log.Debug("Array index: ", i, ". Index of log on each array: ", logsAux[i].Index, logs[i].Index) + return false + } + } + return true +} diff --git a/state/helper_test.go b/state/helper_test.go index f1c510d38a..2f4e1f62f7 100644 --- a/state/helper_test.go +++ b/state/helper_test.go @@ -8,6 +8,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -209,3 +210,37 @@ func TestMaliciousTransaction(t *testing.T) { require.Error(t, err) require.Equal(t, err, state.ErrInvalidData) } + +func TestCheckLogOrder(t *testing.T) { + log1 := types.Log{ + Index: 4, + Address: common.HexToAddress("0x04"), + } + log2 := types.Log{ + Index: 0, + Address: common.HexToAddress("0x00"), + } + log3 := types.Log{ + Index: 3, + Address: common.HexToAddress("0x03"), + } + log4 := types.Log{ + Index: 5, + Address: common.HexToAddress("0x05"), + } + log5 := types.Log{ + Index: 1, + Address: common.HexToAddress("0x01"), + } + log6 := types.Log{ + Index: 2, + Address: common.HexToAddress("0x02"), + } + logs := []*types.Log{&log1, &log2, &log3, &log4, &log5, &log6} + ok := state.CheckLogOrder(logs) + assert.Equal(t, false, ok) + + logs = []*types.Log{&log2, &log5, &log6, &log3, &log1, &log4} + ok = state.CheckLogOrder(logs) + assert.Equal(t, true, ok) +} diff --git a/state/pgstatestorage.go b/state/pgstatestorage.go index 83f30d244f..e0fc51105f 100644 --- a/state/pgstatestorage.go +++ b/state/pgstatestorage.go @@ -826,7 +826,7 @@ func scanForcedBatch(row pgx.Row) (ForcedBatch, error) { // GetEncodedTransactionsByBatchNumber returns the encoded field of all // transactions in the given batch. func (p *PostgresStorage) GetEncodedTransactionsByBatchNumber(ctx context.Context, batchNumber uint64, dbTx pgx.Tx) (encodedTxs []string, effectivePercentages []uint8, err error) { - const getEncodedTransactionsByBatchNumberSQL = "SELECT encoded, effective_percentage FROM state.transaction t INNER JOIN state.l2block b ON t.l2_block_num = b.block_num WHERE b.batch_num = $1 ORDER BY l2_block_num ASC" + const getEncodedTransactionsByBatchNumberSQL = "SELECT encoded, COALESCE(effective_percentage, 255) FROM state.transaction t INNER JOIN state.l2block b ON t.l2_block_num = b.block_num WHERE b.batch_num = $1 ORDER BY l2_block_num ASC" e := p.getExecQuerier(dbTx) rows, err := e.Query(ctx, getEncodedTransactionsByBatchNumberSQL, batchNumber) diff --git a/state/runtime/executor/client.go b/state/runtime/executor/client.go index 92c636aa27..b386d4320f 100644 --- a/state/runtime/executor/client.go +++ b/state/runtime/executor/client.go @@ -6,13 +6,12 @@ import ( "time" "github.com/0xPolygonHermez/zkevm-node/log" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" ) // NewExecutorClient is the executor client constructor. -func NewExecutorClient(ctx context.Context, c Config) (pb.ExecutorServiceClient, *grpc.ClientConn, context.CancelFunc) { +func NewExecutorClient(ctx context.Context, c Config) (ExecutorServiceClient, *grpc.ClientConn, context.CancelFunc) { opts := []grpc.DialOption{ grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(c.MaxGRPCMessageSize)), @@ -47,6 +46,6 @@ func NewExecutorClient(ctx context.Context, c Config) (pb.ExecutorServiceClient, if connectionRetries == maxRetries { log.Fatalf("fail to dial: %v", err) } - executorClient := pb.NewExecutorServiceClient(executorConn) + executorClient := NewExecutorServiceClient(executorConn) return executorClient, executorConn, cancel } diff --git a/state/runtime/executor/errors.go b/state/runtime/executor/errors.go index 0ed116c5c7..2507913f95 100644 --- a/state/runtime/executor/errors.go +++ b/state/runtime/executor/errors.go @@ -5,92 +5,6 @@ import ( "math" "github.com/0xPolygonHermez/zkevm-node/state/runtime" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" -) - -const ( - // ROM_ERROR_UNSPECIFIED indicates the execution ended successfully - ROM_ERROR_UNSPECIFIED int32 = iota - // ROM_ERROR_NO_ERROR indicates the execution ended successfully - ROM_ERROR_NO_ERROR - // ROM_ERROR_OUT_OF_GAS indicates there is not enough balance to continue the execution - ROM_ERROR_OUT_OF_GAS - // ROM_ERROR_STACK_OVERFLOW indicates a stack overflow has happened - ROM_ERROR_STACK_OVERFLOW - // ROM_ERROR_STACK_UNDERFLOW indicates a stack overflow has happened - ROM_ERROR_STACK_UNDERFLOW - // ROM_ERROR_MAX_CODE_SIZE_EXCEEDED indicates the code size is beyond the maximum - ROM_ERROR_MAX_CODE_SIZE_EXCEEDED - // ROM_ERROR_CONTRACT_ADDRESS_COLLISION there is a collision regarding contract addresses - ROM_ERROR_CONTRACT_ADDRESS_COLLISION - // ROM_ERROR_EXECUTION_REVERTED indicates the execution has been reverted - ROM_ERROR_EXECUTION_REVERTED - // ROM_ERROR_OUT_OF_COUNTERS_STEP indicates there is not enough step counters to continue the execution - ROM_ERROR_OUT_OF_COUNTERS_STEP - // ROM_ERROR_OUT_OF_COUNTERS_KECCAK indicates there is not enough keccak counters to continue the execution - ROM_ERROR_OUT_OF_COUNTERS_KECCAK - // ROM_ERROR_OUT_OF_COUNTERS_BINARY indicates there is not enough binary counters to continue the execution - ROM_ERROR_OUT_OF_COUNTERS_BINARY - // ROM_ERROR_OUT_OF_COUNTERS_MEM indicates there is not enough memory aligncounters to continue the execution - ROM_ERROR_OUT_OF_COUNTERS_MEM - // ROM_ERROR_OUT_OF_COUNTERS_ARITH indicates there is not enough arith counters to continue the execution - ROM_ERROR_OUT_OF_COUNTERS_ARITH - // ROM_ERROR_OUT_OF_COUNTERS_PADDING indicates there is not enough padding counters to continue the execution - ROM_ERROR_OUT_OF_COUNTERS_PADDING - // ROM_ERROR_OUT_OF_COUNTERS_POSEIDON indicates there is not enough poseidon counters to continue the execution - ROM_ERROR_OUT_OF_COUNTERS_POSEIDON - // ROM_ERROR_INVALID_JUMP indicates there is an invalid jump opcode - ROM_ERROR_INVALID_JUMP - // ROM_ERROR_INVALID_OPCODE indicates there is an invalid opcode - ROM_ERROR_INVALID_OPCODE - // ROM_ERROR_INVALID_STATIC indicates there is an invalid static call - ROM_ERROR_INVALID_STATIC - // ROM_ERROR_INVALID_BYTECODE_STARTS_EF indicates there is a bytecode starting with 0xEF - ROM_ERROR_INVALID_BYTECODE_STARTS_EF - // ROM_ERROR_INTRINSIC_INVALID_SIGNATURE indicates the transaction is failing at the signature intrinsic check - ROM_ERROR_INTRINSIC_INVALID_SIGNATURE - // ROM_ERROR_INTRINSIC_INVALID_CHAIN_ID indicates the transaction is failing at the chain id intrinsic check - ROM_ERROR_INTRINSIC_INVALID_CHAIN_ID - // ROM_ERROR_INTRINSIC_INVALID_NONCE indicates the transaction is failing at the nonce intrinsic check - ROM_ERROR_INTRINSIC_INVALID_NONCE - // ROM_ERROR_INTRINSIC_INVALID_GAS_LIMIT indicates the transaction is failing at the gas limit intrinsic check - ROM_ERROR_INTRINSIC_INVALID_GAS_LIMIT - // ROM_ERROR_INTRINSIC_INVALID_BALANCE indicates the transaction is failing at balance intrinsic check - ROM_ERROR_INTRINSIC_INVALID_BALANCE - // ROM_ERROR_INTRINSIC_INVALID_BATCH_GAS_LIMIT indicates the batch is exceeding the batch gas limit - ROM_ERROR_INTRINSIC_INVALID_BATCH_GAS_LIMIT - // ROM_ERROR_INTRINSIC_INVALID_SENDER_CODE indicates the batch is exceeding the batch gas limit - ROM_ERROR_INTRINSIC_INVALID_SENDER_CODE - // ROM_ERROR_INTRINSIC_TX_GAS_OVERFLOW indicates the transaction gasLimit*gasPrice > MAX_UINT_256 - 1 - ROM_ERROR_INTRINSIC_TX_GAS_OVERFLOW - // ROM_ERROR_BATCH_DATA_TOO_BIG indicates the batch_l2_data is too big to be processed - ROM_ERROR_BATCH_DATA_TOO_BIG - // ROM_ERROR_UNSUPPORTED_FORK_ID indicates that the fork id is not supported - ROM_ERROR_UNSUPPORTED_FORK_ID - // EXECUTOR_ERROR_UNSPECIFIED indicates the execution ended successfully - EXECUTOR_ERROR_UNSPECIFIED = 0 - // EXECUTOR_ERROR_NO_ERROR indicates there was no error - EXECUTOR_ERROR_NO_ERROR = 1 - // EXECUTOR_ERROR_COUNTERS_OVERFLOW_KECCAK indicates that the keccak counter exceeded the maximum - EXECUTOR_ERROR_COUNTERS_OVERFLOW_KECCAK = 2 - // EXECUTOR_ERROR_COUNTERS_OVERFLOW_BINARY indicates that the binary counter exceeded the maximum - EXECUTOR_ERROR_COUNTERS_OVERFLOW_BINARY = 3 - // EXECUTOR_ERROR_COUNTERS_OVERFLOW_MEM indicates that the memory align counter exceeded the maximum - EXECUTOR_ERROR_COUNTERS_OVERFLOW_MEM = 4 - // EXECUTOR_ERROR_COUNTERS_OVERFLOW_ARITH indicates that the arith counter exceeded the maximum - EXECUTOR_ERROR_COUNTERS_OVERFLOW_ARITH = 5 - // EXECUTOR_ERROR_COUNTERS_OVERFLOW_PADDING indicates that the padding counter exceeded the maximum - EXECUTOR_ERROR_COUNTERS_OVERFLOW_PADDING = 6 - // EXECUTOR_ERROR_COUNTERS_OVERFLOW_POSEIDON indicates that the poseidon counter exceeded the maximum - EXECUTOR_ERROR_COUNTERS_OVERFLOW_POSEIDON = 7 - // EXECUTOR_ERROR_UNSUPPORTED_FORK_ID indicates that the fork id is not supported - EXECUTOR_ERROR_UNSUPPORTED_FORK_ID = 8 - // EXECUTOR_ERROR_BALANCE_MISMATCH indicates that there is a balance mismatch error in the ROM - EXECUTOR_ERROR_BALANCE_MISMATCH = 9 - // EXECUTOR_ERROR_FEA2SCALAR indicates that there is a fea2scalar error in the execution - EXECUTOR_ERROR_FEA2SCALAR = 10 - // EXECUTOR_ERROR_TOS32 indicates that there is a TOS32 error in the execution - EXECUTOR_ERROR_TOS32 = 11 ) var ( @@ -101,226 +15,228 @@ var ( ) // RomErr returns an instance of error related to the ExecutorError -func RomErr(errorCode pb.RomError) error { - e := int32(errorCode) - switch e { - case ROM_ERROR_UNSPECIFIED: +func RomErr(errorCode RomError) error { + switch errorCode { + case RomError_ROM_ERROR_UNSPECIFIED: return fmt.Errorf("unspecified ROM error") - case ROM_ERROR_NO_ERROR: + case RomError_ROM_ERROR_NO_ERROR: return nil - case ROM_ERROR_OUT_OF_GAS: + case RomError_ROM_ERROR_OUT_OF_GAS: return runtime.ErrOutOfGas - case ROM_ERROR_STACK_OVERFLOW: + case RomError_ROM_ERROR_STACK_OVERFLOW: return runtime.ErrStackOverflow - case ROM_ERROR_STACK_UNDERFLOW: + case RomError_ROM_ERROR_STACK_UNDERFLOW: return runtime.ErrStackUnderflow - case ROM_ERROR_MAX_CODE_SIZE_EXCEEDED: + case RomError_ROM_ERROR_MAX_CODE_SIZE_EXCEEDED: return runtime.ErrMaxCodeSizeExceeded - case ROM_ERROR_CONTRACT_ADDRESS_COLLISION: + case RomError_ROM_ERROR_CONTRACT_ADDRESS_COLLISION: return runtime.ErrContractAddressCollision - case ROM_ERROR_EXECUTION_REVERTED: + case RomError_ROM_ERROR_EXECUTION_REVERTED: return runtime.ErrExecutionReverted - case ROM_ERROR_OUT_OF_COUNTERS_STEP: + case RomError_ROM_ERROR_OUT_OF_COUNTERS_STEP: return runtime.ErrOutOfCountersStep - case ROM_ERROR_OUT_OF_COUNTERS_KECCAK: + case RomError_ROM_ERROR_OUT_OF_COUNTERS_KECCAK: return runtime.ErrOutOfCountersKeccak - case ROM_ERROR_OUT_OF_COUNTERS_BINARY: + case RomError_ROM_ERROR_OUT_OF_COUNTERS_BINARY: return runtime.ErrOutOfCountersBinary - case ROM_ERROR_OUT_OF_COUNTERS_MEM: + case RomError_ROM_ERROR_OUT_OF_COUNTERS_MEM: return runtime.ErrOutOfCountersMemory - case ROM_ERROR_OUT_OF_COUNTERS_ARITH: + case RomError_ROM_ERROR_OUT_OF_COUNTERS_ARITH: return runtime.ErrOutOfCountersArith - case ROM_ERROR_OUT_OF_COUNTERS_PADDING: + case RomError_ROM_ERROR_OUT_OF_COUNTERS_PADDING: return runtime.ErrOutOfCountersPadding - case ROM_ERROR_OUT_OF_COUNTERS_POSEIDON: + case RomError_ROM_ERROR_OUT_OF_COUNTERS_POSEIDON: return runtime.ErrOutOfCountersPoseidon - case ROM_ERROR_INVALID_JUMP: + case RomError_ROM_ERROR_INVALID_JUMP: return runtime.ErrInvalidJump - case ROM_ERROR_INVALID_OPCODE: + case RomError_ROM_ERROR_INVALID_OPCODE: return runtime.ErrInvalidOpCode - case ROM_ERROR_INVALID_STATIC: + case RomError_ROM_ERROR_INVALID_STATIC: return runtime.ErrInvalidStatic - case ROM_ERROR_INVALID_BYTECODE_STARTS_EF: + case RomError_ROM_ERROR_INVALID_BYTECODE_STARTS_EF: return runtime.ErrInvalidByteCodeStartsEF - case ROM_ERROR_INTRINSIC_INVALID_SIGNATURE: + case RomError_ROM_ERROR_INTRINSIC_INVALID_SIGNATURE: return runtime.ErrIntrinsicInvalidSignature - case ROM_ERROR_INTRINSIC_INVALID_CHAIN_ID: + case RomError_ROM_ERROR_INTRINSIC_INVALID_CHAIN_ID: return runtime.ErrIntrinsicInvalidChainID - case ROM_ERROR_INTRINSIC_INVALID_NONCE: + case RomError_ROM_ERROR_INTRINSIC_INVALID_NONCE: return runtime.ErrIntrinsicInvalidNonce - case ROM_ERROR_INTRINSIC_INVALID_GAS_LIMIT: + case RomError_ROM_ERROR_INTRINSIC_INVALID_GAS_LIMIT: return runtime.ErrIntrinsicInvalidGasLimit - case ROM_ERROR_INTRINSIC_INVALID_BALANCE: + case RomError_ROM_ERROR_INTRINSIC_INVALID_BALANCE: return runtime.ErrIntrinsicInvalidBalance - case ROM_ERROR_INTRINSIC_INVALID_BATCH_GAS_LIMIT: + case RomError_ROM_ERROR_INTRINSIC_INVALID_BATCH_GAS_LIMIT: return runtime.ErrIntrinsicInvalidBatchGasLimit - case ROM_ERROR_INTRINSIC_INVALID_SENDER_CODE: + case RomError_ROM_ERROR_INTRINSIC_INVALID_SENDER_CODE: return runtime.ErrIntrinsicInvalidSenderCode - case ROM_ERROR_INTRINSIC_TX_GAS_OVERFLOW: + case RomError_ROM_ERROR_INTRINSIC_TX_GAS_OVERFLOW: return runtime.ErrIntrinsicInvalidTxGasOverflow - case ROM_ERROR_BATCH_DATA_TOO_BIG: + case RomError_ROM_ERROR_BATCH_DATA_TOO_BIG: return runtime.ErrBatchDataTooBig - case ROM_ERROR_UNSUPPORTED_FORK_ID: + case RomError_ROM_ERROR_UNSUPPORTED_FORK_ID: return runtime.ErrUnsupportedForkId + case RomError_ROM_ERROR_INVALID_RLP: + return runtime.ErrInvalidRLP } return fmt.Errorf("unknown error") } // RomErrorCode returns the error code for a given error -func RomErrorCode(err error) pb.RomError { +func RomErrorCode(err error) RomError { switch err { case nil: - return pb.RomError(ROM_ERROR_NO_ERROR) + return RomError_ROM_ERROR_NO_ERROR case runtime.ErrOutOfGas: - return pb.RomError(ROM_ERROR_OUT_OF_GAS) + return RomError_ROM_ERROR_OUT_OF_GAS case runtime.ErrStackOverflow: - return pb.RomError(ROM_ERROR_STACK_OVERFLOW) + return RomError_ROM_ERROR_STACK_OVERFLOW case runtime.ErrStackUnderflow: - return pb.RomError(ROM_ERROR_STACK_UNDERFLOW) + return RomError_ROM_ERROR_STACK_UNDERFLOW case runtime.ErrMaxCodeSizeExceeded: - return pb.RomError(ROM_ERROR_MAX_CODE_SIZE_EXCEEDED) + return RomError_ROM_ERROR_MAX_CODE_SIZE_EXCEEDED case runtime.ErrContractAddressCollision: - return pb.RomError(ROM_ERROR_CONTRACT_ADDRESS_COLLISION) + return RomError_ROM_ERROR_CONTRACT_ADDRESS_COLLISION case runtime.ErrExecutionReverted: - return pb.RomError(ROM_ERROR_EXECUTION_REVERTED) + return RomError_ROM_ERROR_EXECUTION_REVERTED case runtime.ErrOutOfCountersStep: - return pb.RomError(ROM_ERROR_OUT_OF_COUNTERS_STEP) + return RomError_ROM_ERROR_OUT_OF_COUNTERS_STEP case runtime.ErrOutOfCountersKeccak: - return pb.RomError(ROM_ERROR_OUT_OF_COUNTERS_KECCAK) + return RomError_ROM_ERROR_OUT_OF_COUNTERS_KECCAK case runtime.ErrOutOfCountersBinary: - return pb.RomError(ROM_ERROR_OUT_OF_COUNTERS_BINARY) + return RomError_ROM_ERROR_OUT_OF_COUNTERS_BINARY case runtime.ErrOutOfCountersMemory: - return pb.RomError(ROM_ERROR_OUT_OF_COUNTERS_MEM) + return RomError_ROM_ERROR_OUT_OF_COUNTERS_MEM case runtime.ErrOutOfCountersArith: - return pb.RomError(ROM_ERROR_OUT_OF_COUNTERS_ARITH) + return RomError_ROM_ERROR_OUT_OF_COUNTERS_ARITH case runtime.ErrOutOfCountersPadding: - return pb.RomError(ROM_ERROR_OUT_OF_COUNTERS_PADDING) + return RomError_ROM_ERROR_OUT_OF_COUNTERS_PADDING case runtime.ErrOutOfCountersPoseidon: - return pb.RomError(ROM_ERROR_OUT_OF_COUNTERS_POSEIDON) + return RomError_ROM_ERROR_OUT_OF_COUNTERS_POSEIDON case runtime.ErrInvalidJump: - return pb.RomError(ROM_ERROR_INVALID_JUMP) + return RomError_ROM_ERROR_INVALID_JUMP case runtime.ErrInvalidOpCode: - return pb.RomError(ROM_ERROR_INVALID_OPCODE) + return RomError_ROM_ERROR_INVALID_OPCODE case runtime.ErrInvalidStatic: - return pb.RomError(ROM_ERROR_INVALID_STATIC) + return RomError_ROM_ERROR_INVALID_STATIC case runtime.ErrInvalidByteCodeStartsEF: - return pb.RomError(ROM_ERROR_INVALID_BYTECODE_STARTS_EF) + return RomError_ROM_ERROR_INVALID_BYTECODE_STARTS_EF case runtime.ErrIntrinsicInvalidSignature: - return pb.RomError(ROM_ERROR_INTRINSIC_INVALID_SIGNATURE) + return RomError_ROM_ERROR_INTRINSIC_INVALID_SIGNATURE case runtime.ErrIntrinsicInvalidChainID: - return pb.RomError(ROM_ERROR_INTRINSIC_INVALID_CHAIN_ID) + return RomError_ROM_ERROR_INTRINSIC_INVALID_CHAIN_ID case runtime.ErrIntrinsicInvalidNonce: - return pb.RomError(ROM_ERROR_INTRINSIC_INVALID_NONCE) + return RomError_ROM_ERROR_INTRINSIC_INVALID_NONCE case runtime.ErrIntrinsicInvalidGasLimit: - return pb.RomError(ROM_ERROR_INTRINSIC_INVALID_GAS_LIMIT) + return RomError_ROM_ERROR_INTRINSIC_INVALID_GAS_LIMIT case runtime.ErrIntrinsicInvalidBalance: - return pb.RomError(ROM_ERROR_INTRINSIC_INVALID_BALANCE) + return RomError_ROM_ERROR_INTRINSIC_INVALID_BALANCE case runtime.ErrIntrinsicInvalidBatchGasLimit: - return pb.RomError(ROM_ERROR_INTRINSIC_INVALID_BATCH_GAS_LIMIT) + return RomError_ROM_ERROR_INTRINSIC_INVALID_BATCH_GAS_LIMIT case runtime.ErrIntrinsicInvalidSenderCode: - return pb.RomError(ROM_ERROR_INTRINSIC_INVALID_SENDER_CODE) + return RomError_ROM_ERROR_INTRINSIC_INVALID_SENDER_CODE case runtime.ErrIntrinsicInvalidTxGasOverflow: - return pb.RomError(ROM_ERROR_INTRINSIC_TX_GAS_OVERFLOW) + return RomError_ROM_ERROR_INTRINSIC_TX_GAS_OVERFLOW case runtime.ErrBatchDataTooBig: - return pb.RomError(ROM_ERROR_BATCH_DATA_TOO_BIG) + return RomError_ROM_ERROR_BATCH_DATA_TOO_BIG case runtime.ErrUnsupportedForkId: - return pb.RomError(ROM_ERROR_UNSUPPORTED_FORK_ID) + return RomError_ROM_ERROR_UNSUPPORTED_FORK_ID + case runtime.ErrInvalidRLP: + return RomError_ROM_ERROR_INVALID_RLP } return math.MaxInt32 } // IsROMOutOfCountersError indicates if the error is an ROM OOC -func IsROMOutOfCountersError(error pb.RomError) bool { - return int32(error) >= ROM_ERROR_OUT_OF_COUNTERS_STEP && int32(error) <= ROM_ERROR_OUT_OF_COUNTERS_POSEIDON +func IsROMOutOfCountersError(error RomError) bool { + return error >= RomError_ROM_ERROR_OUT_OF_COUNTERS_STEP && error <= RomError_ROM_ERROR_OUT_OF_COUNTERS_POSEIDON } // IsROMOutOfGasError indicates if the error is an ROM OOG -func IsROMOutOfGasError(error pb.RomError) bool { - return int32(error) == ROM_ERROR_OUT_OF_GAS +func IsROMOutOfGasError(error RomError) bool { + return error == RomError_ROM_ERROR_OUT_OF_GAS } // IsExecutorOutOfCountersError indicates if the error is an ROM OOC -func IsExecutorOutOfCountersError(error pb.ExecutorError) bool { - return int32(error) >= EXECUTOR_ERROR_COUNTERS_OVERFLOW_KECCAK && int32(error) <= ROM_ERROR_OUT_OF_COUNTERS_POSEIDON +func IsExecutorOutOfCountersError(error ExecutorError) bool { + return error >= ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_KECCAK && error <= ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_POSEIDON } // IsExecutorUnspecifiedError indicates an unspecified error in the executor -func IsExecutorUnspecifiedError(error pb.ExecutorError) bool { - return int32(error) == EXECUTOR_ERROR_UNSPECIFIED +func IsExecutorUnspecifiedError(error ExecutorError) bool { + return error == ExecutorError_EXECUTOR_ERROR_UNSPECIFIED } // IsIntrinsicError indicates if the error is due to a intrinsic check -func IsIntrinsicError(error pb.RomError) bool { - return int32(error) >= ROM_ERROR_INTRINSIC_INVALID_SIGNATURE && int32(error) <= ROM_ERROR_INTRINSIC_TX_GAS_OVERFLOW +func IsIntrinsicError(error RomError) bool { + return error >= RomError_ROM_ERROR_INTRINSIC_INVALID_SIGNATURE && error <= RomError_ROM_ERROR_INTRINSIC_TX_GAS_OVERFLOW } // IsInvalidNonceError indicates if the error is due to a invalid nonce -func IsInvalidNonceError(error pb.RomError) bool { - return int32(error) == ROM_ERROR_INTRINSIC_INVALID_NONCE +func IsInvalidNonceError(error RomError) bool { + return error == RomError_ROM_ERROR_INTRINSIC_INVALID_NONCE } // IsInvalidBalanceError indicates if the error is due to a invalid balance -func IsInvalidBalanceError(error pb.RomError) bool { - return int32(error) == ROM_ERROR_INTRINSIC_INVALID_BALANCE +func IsInvalidBalanceError(error RomError) bool { + return error == RomError_ROM_ERROR_INTRINSIC_INVALID_BALANCE } // ExecutorErr returns an instance of error related to the ExecutorError -func ExecutorErr(errorCode pb.ExecutorError) error { - e := int32(errorCode) - switch e { - case EXECUTOR_ERROR_UNSPECIFIED: +func ExecutorErr(errorCode ExecutorError) error { + switch errorCode { + case ExecutorError_EXECUTOR_ERROR_UNSPECIFIED: return ErrUnspecified - case EXECUTOR_ERROR_NO_ERROR: + case ExecutorError_EXECUTOR_ERROR_NO_ERROR: return nil - case EXECUTOR_ERROR_COUNTERS_OVERFLOW_KECCAK: + case ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_KECCAK: return runtime.ErrOutOfCountersKeccak - case EXECUTOR_ERROR_COUNTERS_OVERFLOW_BINARY: + case ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_BINARY: return runtime.ErrOutOfCountersBinary - case EXECUTOR_ERROR_COUNTERS_OVERFLOW_MEM: + case ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_MEM: return runtime.ErrOutOfCountersMemory - case EXECUTOR_ERROR_COUNTERS_OVERFLOW_ARITH: + case ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_ARITH: return runtime.ErrOutOfCountersArith - case EXECUTOR_ERROR_COUNTERS_OVERFLOW_PADDING: + case ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_PADDING: return runtime.ErrOutOfCountersPadding - case EXECUTOR_ERROR_COUNTERS_OVERFLOW_POSEIDON: + case ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_POSEIDON: return runtime.ErrOutOfCountersPoseidon - case EXECUTOR_ERROR_UNSUPPORTED_FORK_ID: + case ExecutorError_EXECUTOR_ERROR_UNSUPPORTED_FORK_ID: return runtime.ErrUnsupportedForkId - case EXECUTOR_ERROR_BALANCE_MISMATCH: + case ExecutorError_EXECUTOR_ERROR_BALANCE_MISMATCH: return runtime.ErrBalanceMismatch - case EXECUTOR_ERROR_FEA2SCALAR: + case ExecutorError_EXECUTOR_ERROR_FEA2SCALAR: return runtime.ErrFea2Scalar - case EXECUTOR_ERROR_TOS32: + case ExecutorError_EXECUTOR_ERROR_TOS32: return runtime.ErrTos32 } return ErrUnknown } // ExecutorErrorCode returns the error code for a given error -func ExecutorErrorCode(err error) pb.ExecutorError { +func ExecutorErrorCode(err error) ExecutorError { switch err { case nil: - return pb.ExecutorError(EXECUTOR_ERROR_NO_ERROR) + return ExecutorError_EXECUTOR_ERROR_NO_ERROR case runtime.ErrOutOfCountersKeccak: - return pb.ExecutorError(EXECUTOR_ERROR_COUNTERS_OVERFLOW_KECCAK) + return ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_KECCAK case runtime.ErrOutOfCountersBinary: - return pb.ExecutorError(EXECUTOR_ERROR_COUNTERS_OVERFLOW_BINARY) + return ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_BINARY case runtime.ErrOutOfCountersMemory: - return pb.ExecutorError(EXECUTOR_ERROR_COUNTERS_OVERFLOW_MEM) + return ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_MEM case runtime.ErrOutOfCountersArith: - return pb.ExecutorError(EXECUTOR_ERROR_COUNTERS_OVERFLOW_ARITH) + return ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_ARITH case runtime.ErrOutOfCountersPadding: - return pb.ExecutorError(EXECUTOR_ERROR_COUNTERS_OVERFLOW_PADDING) + return ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_PADDING case runtime.ErrOutOfCountersPoseidon: - return pb.ExecutorError(EXECUTOR_ERROR_COUNTERS_OVERFLOW_POSEIDON) + return ExecutorError_EXECUTOR_ERROR_COUNTERS_OVERFLOW_POSEIDON case runtime.ErrUnsupportedForkId: - return pb.ExecutorError(EXECUTOR_ERROR_UNSUPPORTED_FORK_ID) + return ExecutorError_EXECUTOR_ERROR_UNSUPPORTED_FORK_ID case runtime.ErrBalanceMismatch: - return pb.ExecutorError(EXECUTOR_ERROR_BALANCE_MISMATCH) + return ExecutorError_EXECUTOR_ERROR_BALANCE_MISMATCH case runtime.ErrFea2Scalar: - return pb.ExecutorError(EXECUTOR_ERROR_FEA2SCALAR) + return ExecutorError_EXECUTOR_ERROR_FEA2SCALAR case runtime.ErrTos32: - return pb.ExecutorError(EXECUTOR_ERROR_TOS32) + return ExecutorError_EXECUTOR_ERROR_TOS32 } return math.MaxInt32 } diff --git a/state/runtime/executor/pb/executor.pb.go b/state/runtime/executor/executor.pb.go similarity index 95% rename from state/runtime/executor/pb/executor.pb.go rename to state/runtime/executor/executor.pb.go index 5f10435674..08f8a33b22 100644 --- a/state/runtime/executor/pb/executor.pb.go +++ b/state/runtime/executor/executor.pb.go @@ -4,7 +4,7 @@ // protoc v3.21.12 // source: executor.proto -package pb +package executor import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" @@ -81,6 +81,8 @@ const ( RomError_ROM_ERROR_BATCH_DATA_TOO_BIG RomError = 27 // ROM_ERROR_UNSUPPORTED_FORK_ID indicates that the fork id is not supported RomError_ROM_ERROR_UNSUPPORTED_FORK_ID RomError = 28 + // ROM_ERROR_INVALID_RLP indicates that there has been an error while parsing the RLP + RomError_ROM_ERROR_INVALID_RLP RomError = 29 ) // Enum value maps for RomError. @@ -115,6 +117,7 @@ var ( 26: "ROM_ERROR_INTRINSIC_TX_GAS_OVERFLOW", 27: "ROM_ERROR_BATCH_DATA_TOO_BIG", 28: "ROM_ERROR_UNSUPPORTED_FORK_ID", + 29: "ROM_ERROR_INVALID_RLP", } RomError_value = map[string]int32{ "ROM_ERROR_UNSPECIFIED": 0, @@ -146,6 +149,7 @@ var ( "ROM_ERROR_INTRINSIC_TX_GAS_OVERFLOW": 26, "ROM_ERROR_BATCH_DATA_TOO_BIG": 27, "ROM_ERROR_UNSUPPORTED_FORK_ID": 28, + "ROM_ERROR_INVALID_RLP": 29, } ) @@ -2065,7 +2069,7 @@ var file_executor_proto_rawDesc = []byte{ 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x2a, 0xab, 0x08, 0x0a, 0x08, 0x52, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, + 0x2a, 0xc6, 0x08, 0x0a, 0x08, 0x52, 0x6f, 0x6d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x01, @@ -2131,54 +2135,55 @@ var file_executor_proto_rawDesc = []byte{ 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x42, 0x41, 0x54, 0x43, 0x48, 0x5f, 0x44, 0x41, 0x54, 0x41, 0x5f, 0x54, 0x4f, 0x4f, 0x5f, 0x42, 0x49, 0x47, 0x10, 0x1b, 0x12, 0x21, 0x0a, 0x1d, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, - 0x52, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x4b, 0x5f, 0x49, 0x44, 0x10, 0x1c, 0x2a, 0xdf, - 0x03, 0x0a, 0x0d, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, - 0x12, 0x1e, 0x0a, 0x1a, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, - 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, - 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, - 0x4f, 0x52, 0x5f, 0x4e, 0x4f, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x01, 0x12, 0x2b, 0x0a, - 0x27, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, - 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, - 0x57, 0x5f, 0x4b, 0x45, 0x43, 0x43, 0x41, 0x4b, 0x10, 0x02, 0x12, 0x2b, 0x0a, 0x27, 0x45, 0x58, - 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x42, - 0x49, 0x4e, 0x41, 0x52, 0x59, 0x10, 0x03, 0x12, 0x28, 0x0a, 0x24, 0x45, 0x58, 0x45, 0x43, 0x55, - 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, - 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x4d, 0x45, 0x4d, 0x10, - 0x04, 0x12, 0x2a, 0x0a, 0x26, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, - 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, - 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x10, 0x05, 0x12, 0x2c, 0x0a, - 0x28, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, - 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, - 0x57, 0x5f, 0x50, 0x41, 0x44, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x06, 0x12, 0x2d, 0x0a, 0x29, 0x45, - 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, - 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, - 0x50, 0x4f, 0x53, 0x45, 0x49, 0x44, 0x4f, 0x4e, 0x10, 0x07, 0x12, 0x26, 0x0a, 0x22, 0x45, 0x58, - 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, - 0x55, 0x50, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x4b, 0x5f, 0x49, 0x44, - 0x10, 0x08, 0x12, 0x23, 0x0a, 0x1f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, - 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x4d, 0x49, 0x53, - 0x4d, 0x41, 0x54, 0x43, 0x48, 0x10, 0x09, 0x12, 0x1d, 0x0a, 0x19, 0x45, 0x58, 0x45, 0x43, 0x55, - 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x46, 0x45, 0x41, 0x32, 0x53, 0x43, - 0x41, 0x4c, 0x41, 0x52, 0x10, 0x0a, 0x12, 0x18, 0x0a, 0x14, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, - 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x54, 0x4f, 0x53, 0x33, 0x32, 0x10, 0x0b, - 0x32, 0xb9, 0x01, 0x0a, 0x0f, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, - 0x76, 0x69, 0x63, 0x65, 0x12, 0x55, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, - 0x61, 0x74, 0x63, 0x68, 0x12, 0x20, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, - 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, - 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, - 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0e, 0x47, - 0x65, 0x74, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x23, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, - 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x41, 0x5a, 0x3f, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x30, 0x78, 0x50, 0x6f, 0x6c, - 0x79, 0x67, 0x6f, 0x6e, 0x48, 0x65, 0x72, 0x6d, 0x65, 0x7a, 0x2f, 0x7a, 0x6b, 0x65, 0x76, 0x6d, - 0x2d, 0x6e, 0x6f, 0x64, 0x65, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2f, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2f, 0x70, 0x62, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x52, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x4b, 0x5f, 0x49, 0x44, 0x10, 0x1c, 0x12, 0x19, + 0x0a, 0x15, 0x52, 0x4f, 0x4d, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x49, 0x4e, 0x56, 0x41, + 0x4c, 0x49, 0x44, 0x5f, 0x52, 0x4c, 0x50, 0x10, 0x1d, 0x2a, 0xdf, 0x03, 0x0a, 0x0d, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x45, + 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, + 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x45, + 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x4e, 0x4f, + 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x01, 0x12, 0x2b, 0x0a, 0x27, 0x45, 0x58, 0x45, 0x43, + 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, + 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x4b, 0x45, 0x43, + 0x43, 0x41, 0x4b, 0x10, 0x02, 0x12, 0x2b, 0x0a, 0x27, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, + 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, + 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x42, 0x49, 0x4e, 0x41, 0x52, 0x59, + 0x10, 0x03, 0x12, 0x28, 0x0a, 0x24, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, + 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x4d, 0x45, 0x4d, 0x10, 0x04, 0x12, 0x2a, 0x0a, 0x26, + 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, + 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, + 0x5f, 0x41, 0x52, 0x49, 0x54, 0x48, 0x10, 0x05, 0x12, 0x2c, 0x0a, 0x28, 0x45, 0x58, 0x45, 0x43, + 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, + 0x45, 0x52, 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x41, 0x44, + 0x44, 0x49, 0x4e, 0x47, 0x10, 0x06, 0x12, 0x2d, 0x0a, 0x29, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, + 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, + 0x53, 0x5f, 0x4f, 0x56, 0x45, 0x52, 0x46, 0x4c, 0x4f, 0x57, 0x5f, 0x50, 0x4f, 0x53, 0x45, 0x49, + 0x44, 0x4f, 0x4e, 0x10, 0x07, 0x12, 0x26, 0x0a, 0x22, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, + 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x55, 0x50, 0x50, 0x4f, 0x52, + 0x54, 0x45, 0x44, 0x5f, 0x46, 0x4f, 0x52, 0x4b, 0x5f, 0x49, 0x44, 0x10, 0x08, 0x12, 0x23, 0x0a, + 0x1f, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x5f, + 0x42, 0x41, 0x4c, 0x41, 0x4e, 0x43, 0x45, 0x5f, 0x4d, 0x49, 0x53, 0x4d, 0x41, 0x54, 0x43, 0x48, + 0x10, 0x09, 0x12, 0x1d, 0x0a, 0x19, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, + 0x52, 0x52, 0x4f, 0x52, 0x5f, 0x46, 0x45, 0x41, 0x32, 0x53, 0x43, 0x41, 0x4c, 0x41, 0x52, 0x10, + 0x0a, 0x12, 0x18, 0x0a, 0x14, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x4f, 0x52, 0x5f, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x5f, 0x54, 0x4f, 0x53, 0x33, 0x32, 0x10, 0x0b, 0x32, 0xb9, 0x01, 0x0a, 0x0f, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, + 0x55, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, + 0x20, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x21, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, + 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4f, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x46, 0x6c, 0x75, + 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x23, 0x2e, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x47, + 0x65, 0x74, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x30, 0x78, 0x50, 0x6f, 0x6c, 0x79, 0x67, 0x6f, 0x6e, 0x48, + 0x65, 0x72, 0x6d, 0x65, 0x7a, 0x2f, 0x7a, 0x6b, 0x65, 0x76, 0x6d, 0x2d, 0x6e, 0x6f, 0x64, 0x65, + 0x2f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/state/runtime/executor/pb/executor_grpc.pb.go b/state/runtime/executor/executor_grpc.pb.go similarity index 99% rename from state/runtime/executor/pb/executor_grpc.pb.go rename to state/runtime/executor/executor_grpc.pb.go index eed451198a..29b51509db 100644 --- a/state/runtime/executor/pb/executor_grpc.pb.go +++ b/state/runtime/executor/executor_grpc.pb.go @@ -4,7 +4,7 @@ // - protoc v3.21.12 // source: executor.proto -package pb +package executor import ( context "context" diff --git a/state/runtime/runtime.go b/state/runtime/runtime.go index 05936c0bb4..b452d87181 100644 --- a/state/runtime/runtime.go +++ b/state/runtime/runtime.go @@ -63,6 +63,8 @@ var ( ErrIntrinsicInvalidTxGasOverflow = errors.New("gas overflow") // ErrUnsupportedForkId indicates that the fork id is not supported ErrUnsupportedForkId = errors.New("unsupported fork id") + // ErrInvalidRLP indicates that there has been an error while parsing the RLP + ErrInvalidRLP = errors.New("invalid RLP") // ErrBalanceMismatch indicates that the balance mismatch in the ROM ErrBalanceMismatch = errors.New("balance mismatch") // ErrFea2Scalar indicates a fea2scalar error in the ROM diff --git a/state/state.go b/state/state.go index 17c69232b6..caeb538759 100644 --- a/state/state.go +++ b/state/state.go @@ -8,7 +8,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/event" "github.com/0xPolygonHermez/zkevm-node/merkletree" "github.com/0xPolygonHermez/zkevm-node/state/metrics" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" + "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" "github.com/jackc/pgx/v4" @@ -26,7 +26,7 @@ var ( type State struct { cfg Config *PostgresStorage - executorClient pb.ExecutorServiceClient + executorClient executor.ExecutorServiceClient tree *merkletree.StateTree eventLog *event.EventLog @@ -36,7 +36,7 @@ type State struct { } // NewState creates a new State -func NewState(cfg Config, storage *PostgresStorage, executorClient pb.ExecutorServiceClient, stateTree *merkletree.StateTree, eventLog *event.EventLog) *State { +func NewState(cfg Config, storage *PostgresStorage, executorClient executor.ExecutorServiceClient, stateTree *merkletree.StateTree, eventLog *event.EventLog) *State { var once sync.Once once.Do(func() { metrics.Register() diff --git a/state/state_test.go b/state/state_test.go index 864ab98476..d02ed66227 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -26,7 +26,6 @@ import ( "github.com/0xPolygonHermez/zkevm-node/state/metrics" "github.com/0xPolygonHermez/zkevm-node/state/runtime" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" - executorclientpb "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" "github.com/0xPolygonHermez/zkevm-node/test/constants" "github.com/0xPolygonHermez/zkevm-node/test/contracts/bin/Counter" "github.com/0xPolygonHermez/zkevm-node/test/dbutils" @@ -67,7 +66,7 @@ var ( }}, } forkID uint64 = 5 - executorClient executorclientpb.ExecutorServiceClient + executorClient executor.ExecutorServiceClient mtDBServiceClient mtDBclientpb.HashDBServiceClient executorClientConn, mtDBClientConn *grpc.ClientConn batchResources = state.BatchResources{ @@ -510,7 +509,7 @@ func TestExecuteTransaction(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: 0, Coinbase: sequencerAddress.String(), BatchL2Data: batchL2Data, @@ -743,7 +742,7 @@ func TestExecutor(t *testing.T) { } // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: 0, Coinbase: common.HexToAddress("0x617b3a3528F9cDd6630fd3301B9c8911F7Bf063D").String(), BatchL2Data: common.Hex2Bytes("ee80843b9aca00830186a0944d5cf5032b2a844602278b01199ed191a86c93ff88016345785d8a0000808203e880801cee7e01dc62f69a12c3510c6d64de04ee6346d84b6a017f3e786c7d87f963e75d8cc91fa983cd6d9cf55fff80d73bd26cd333b0f098acc1e58edb1fd484ad731bff"), @@ -824,7 +823,7 @@ func TestExecutorRevert(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: 0, Coinbase: sequencerAddress.String(), BatchL2Data: batchL2Data, @@ -947,7 +946,7 @@ func TestExecutorRevert(t *testing.T) { // require.NoError(t, err) // // // Create Batch -// processBatchRequest := &executorclientpb.ProcessBatchRequest{ +// processBatchRequest := &executor.ProcessBatchRequest{ // OldBatchNum: 0, // Coinbase: sequencerAddress.String(), // BatchL2Data: batchL2Data, @@ -1028,7 +1027,7 @@ func TestExecutorTransfer(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: 0, Coinbase: receiverAddress.String(), BatchL2Data: batchL2Data, @@ -1175,7 +1174,7 @@ func TestExecutorTxHashAndRLP(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: uint64(x), Coinbase: receiverAddress.String(), BatchL2Data: batchL2Data, @@ -1284,7 +1283,7 @@ func TestExecutorInvalidNonce(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: 0, Coinbase: receiverAddress.String(), BatchL2Data: batchL2Data, @@ -1811,7 +1810,7 @@ func TestExecutorUniswapOutOfCounters(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ BatchNum: numBatch, Coinbase: common.Address{}.String(), BatchL2Data: batchL2Data, @@ -1884,7 +1883,7 @@ func TestExecutorUniswapOutOfCounters(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ BatchNum: numBatch, Coinbase: common.Address{}.String(), BatchL2Data: batchL2Data, @@ -1911,7 +1910,7 @@ func TestExecutorUniswapOutOfCounters(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ BatchNum: numBatch, Coinbase: common.Address{}.String(), BatchL2Data: batchL2Data, @@ -2021,7 +2020,7 @@ func TestExecutorEstimateGas(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: 0, Coinbase: sequencerAddress.String(), BatchL2Data: batchL2Data, @@ -2171,7 +2170,7 @@ func TestExecutorGasRefund(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ BatchNum: 1, Coinbase: sequencerAddress.String(), BatchL2Data: batchL2Data, @@ -2237,7 +2236,7 @@ func TestExecutorGasRefund(t *testing.T) { batchL2Data, err = state.EncodeTransactions([]types.Transaction{*signedTx2}) require.NoError(t, err) - processBatchRequest = &executorclientpb.ProcessBatchRequest{ + processBatchRequest = &executor.ProcessBatchRequest{ BatchNum: 2, Coinbase: sequencerAddress.String(), BatchL2Data: batchL2Data, @@ -2375,7 +2374,7 @@ func TestExecutorGasEstimationMultisig(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: 0, Coinbase: sequencerAddress.String(), BatchL2Data: batchL2Data, @@ -2390,12 +2389,12 @@ func TestExecutorGasEstimationMultisig(t *testing.T) { processBatchResponse, err := executorClient.ProcessBatch(ctx, processBatchRequest) require.NoError(t, err) - assert.Equal(t, executorclientpb.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[0].Error) - assert.Equal(t, executorclientpb.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[1].Error) - assert.Equal(t, executorclientpb.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[2].Error) - assert.Equal(t, executorclientpb.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[3].Error) - assert.Equal(t, executorclientpb.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[4].Error) - assert.Equal(t, executorclientpb.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[5].Error) + assert.Equal(t, executor.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[0].Error) + assert.Equal(t, executor.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[1].Error) + assert.Equal(t, executor.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[2].Error) + assert.Equal(t, executor.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[3].Error) + assert.Equal(t, executor.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[4].Error) + assert.Equal(t, executor.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[5].Error) // Check SC code // Check Smart Contracts Code @@ -2462,7 +2461,7 @@ func TestExecutorGasEstimationMultisig(t *testing.T) { batchL2Data, err = state.EncodeTransactions([]types.Transaction{*signedTx6}, constants.EffectivePercentage, forkID) require.NoError(t, err) - processBatchRequest = &executorclientpb.ProcessBatchRequest{ + processBatchRequest = &executor.ProcessBatchRequest{ OldBatchNum: 1, Coinbase: sequencerAddress.String(), BatchL2Data: batchL2Data, @@ -2477,7 +2476,7 @@ func TestExecutorGasEstimationMultisig(t *testing.T) { processBatchResponse, err = executorClient.ProcessBatch(ctx, processBatchRequest) require.NoError(t, err) - assert.Equal(t, executorclientpb.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[0].Error) + assert.Equal(t, executor.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[0].Error) log.Debugf("Used gas = %v", processBatchResponse.Responses[0].GasUsed) } @@ -2519,7 +2518,7 @@ func TestExecuteWithoutUpdatingMT(t *testing.T) { require.NoError(t, err) // Create Batch - processBatchRequest := &executorclientpb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: 0, Coinbase: sequencerAddress.String(), BatchL2Data: batchL2Data, @@ -2536,7 +2535,7 @@ func TestExecuteWithoutUpdatingMT(t *testing.T) { require.NoError(t, err) // assert signed tx do deploy sc - assert.Equal(t, executorclientpb.RomError(1), processBatchResponse.Responses[0].Error) + assert.Equal(t, executor.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[0].Error) assert.Equal(t, scAddress, common.HexToAddress(processBatchResponse.Responses[0].CreateAddress)) log.Debug(processBatchResponse) @@ -2578,7 +2577,7 @@ func TestExecuteWithoutUpdatingMT(t *testing.T) { require.NoError(t, err) // Create Batch 2 - processBatchRequest = &executorclientpb.ProcessBatchRequest{ + processBatchRequest = &executor.ProcessBatchRequest{ OldBatchNum: 1, Coinbase: sequencerAddress.String(), BatchL2Data: batchL2Data2, @@ -2597,10 +2596,10 @@ func TestExecuteWithoutUpdatingMT(t *testing.T) { log.Debug(processBatchResponse) // assert signed tx to increment counter - assert.Equal(t, executorclientpb.RomError(1), processBatchResponse.Responses[0].Error) + assert.Equal(t, executor.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[0].Error) // assert signed tx to increment counter - assert.Equal(t, executorclientpb.RomError(1), processBatchResponse.Responses[1].Error) + assert.Equal(t, executor.RomError_ROM_ERROR_NO_ERROR, processBatchResponse.Responses[1].Error) assert.Equal(t, "0000000000000000000000000000000000000000000000000000000000000001", hex.EncodeToString(processBatchResponse.Responses[1].ReturnValue)) } diff --git a/state/transaction.go b/state/transaction.go index 633932e8f9..4f1ec2ffc8 100644 --- a/state/transaction.go +++ b/state/transaction.go @@ -14,7 +14,6 @@ import ( "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state/runtime" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" "github.com/0xPolygonHermez/zkevm-node/state/runtime/fakevm" "github.com/0xPolygonHermez/zkevm-node/state/runtime/instrumentation" "github.com/0xPolygonHermez/zkevm-node/state/runtime/instrumentation/js" @@ -174,6 +173,9 @@ func (s *State) StoreTransactions(ctx context.Context, batchNumber uint64, proce transactions := []*types.Transaction{&processedTx.Tx} receipt := generateReceipt(header.Number, processedTx) + if !CheckLogOrder(receipt.Logs) { + return fmt.Errorf("error: logs received from executor are not in order") + } receipts := []*types.Receipt{receipt} // Create block to be able to calculate its hash @@ -241,7 +243,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has } // Create Batch - traceConfigRequest := &pb.TraceConfig{ + traceConfigRequest := &executor.TraceConfig{ TxHashToGenerateCallTrace: transactionHash.Bytes(), TxHashToGenerateExecuteTrace: transactionHash.Bytes(), // set the defaults to the maximum information we can have. @@ -270,7 +272,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has } oldStateRoot := previousBlock.Root() - processBatchRequest := &pb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: batch.BatchNumber - 1, OldStateRoot: oldStateRoot.Bytes(), OldAccInputHash: previousBatch.AccInputHash.Bytes(), @@ -291,7 +293,7 @@ func (s *State) DebugTransaction(ctx context.Context, transactionHash common.Has endTime := time.Now() if err != nil { return nil, err - } else if processBatchResponse.Error != executor.EXECUTOR_ERROR_NO_ERROR { + } else if processBatchResponse.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR { err = executor.ExecutorErr(processBatchResponse.Error) s.eventLog.LogExecutorError(ctx, processBatchResponse.Error, processBatchRequest) return nil, err @@ -793,15 +795,16 @@ func (s *State) internalProcessUnsignedTransaction(ctx context.Context, tx *type } } - batchL2Data, err := EncodeUnsignedTransaction(*tx, s.cfg.ChainID, &nonce) + forkID := s.GetForkIDByBatchNumber(lastBatch.BatchNumber) + + batchL2Data, err := EncodeUnsignedTransaction(*tx, s.cfg.ChainID, &nonce, forkID) if err != nil { log.Errorf("error encoding unsigned transaction ", err) return nil, err } - forkID := s.GetForkIDByBatchNumber(lastBatch.BatchNumber) // Create Batch - processBatchRequest := &pb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: lastBatch.BatchNumber, BatchL2Data: batchL2Data, From: senderAddress.String(), @@ -854,13 +857,13 @@ func (s *State) internalProcessUnsignedTransaction(ctx context.Context, tx *type return nil, runtime.ErrGRPCResourceExhaustedAsTimeout } // Log this error as an executor unspecified error - s.eventLog.LogExecutorError(ctx, pb.ExecutorError_EXECUTOR_ERROR_UNSPECIFIED, processBatchRequest) + s.eventLog.LogExecutorError(ctx, executor.ExecutorError_EXECUTOR_ERROR_UNSPECIFIED, processBatchRequest) log.Errorf("error processing unsigned transaction ", err) return nil, err } } - if err == nil && processBatchResponse.Error != executor.EXECUTOR_ERROR_NO_ERROR { + if err == nil && processBatchResponse.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR { err = executor.ExecutorErr(processBatchResponse.Error) s.eventLog.LogExecutorError(ctx, processBatchResponse.Error, processBatchRequest) return nil, err @@ -871,7 +874,7 @@ func (s *State) internalProcessUnsignedTransaction(ctx context.Context, tx *type return nil, err } - if processBatchResponse.Responses[0].Error != pb.RomError(executor.ROM_ERROR_NO_ERROR) { + if processBatchResponse.Responses[0].Error != executor.RomError_ROM_ERROR_NO_ERROR { err := executor.RomErr(processBatchResponse.Responses[0].Error) if !isEVMRevertError(err) { return response, err @@ -1036,15 +1039,16 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common Data: transaction.Data(), }) - batchL2Data, err := EncodeUnsignedTransaction(*tx, s.cfg.ChainID, nil) + forkID := s.GetForkIDByBatchNumber(lastBatch.BatchNumber) + + batchL2Data, err := EncodeUnsignedTransaction(*tx, s.cfg.ChainID, nil, forkID) if err != nil { log.Errorf("error encoding unsigned transaction ", err) return false, false, gasUsed, nil, err } - forkID := s.GetForkIDByBatchNumber(lastBatch.BatchNumber) // Create a batch to be sent to the executor - processBatchRequest := &pb.ProcessBatchRequest{ + processBatchRequest := &executor.ProcessBatchRequest{ OldBatchNum: lastBatch.BatchNumber, BatchL2Data: batchL2Data, From: senderAddress.String(), @@ -1078,14 +1082,14 @@ func (s *State) EstimateGas(transaction *types.Transaction, senderAddress common return false, false, gasUsed, nil, err } gasUsed = processBatchResponse.Responses[0].GasUsed - if processBatchResponse.Error != executor.EXECUTOR_ERROR_NO_ERROR { + if processBatchResponse.Error != executor.ExecutorError_EXECUTOR_ERROR_NO_ERROR { err = executor.ExecutorErr(processBatchResponse.Error) s.eventLog.LogExecutorError(ctx, processBatchResponse.Error, processBatchRequest) return false, false, gasUsed, nil, err } // Check if an out of gas error happened during EVM execution - if processBatchResponse.Responses[0].Error != pb.RomError(executor.ROM_ERROR_NO_ERROR) { + if processBatchResponse.Responses[0].Error != executor.RomError_ROM_ERROR_NO_ERROR { err := executor.RomErr(processBatchResponse.Responses[0].Error) if (isGasEVMError(err) || isGasApplyError(err)) && shouldOmitErr { diff --git a/synchronizer/interfaces.go b/synchronizer/interfaces.go index 3c1d7f0c40..e40c11193e 100644 --- a/synchronizer/interfaces.go +++ b/synchronizer/interfaces.go @@ -8,7 +8,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/state/metrics" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" + "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/jackc/pgx/v4" @@ -40,14 +40,14 @@ type stateInterface interface { AddVirtualBatch(ctx context.Context, virtualBatch *state.VirtualBatch, dbTx pgx.Tx) error GetNextForcedBatches(ctx context.Context, nextForcedBatches int, dbTx pgx.Tx) ([]state.ForcedBatch, error) AddVerifiedBatch(ctx context.Context, verifiedBatch *state.VerifiedBatch, dbTx pgx.Tx) error - ProcessAndStoreClosedBatch(ctx context.Context, processingCtx state.ProcessingContext, encodedTxs []byte, dbTx pgx.Tx, caller metrics.CallerLabel) (common.Hash, error) + ProcessAndStoreClosedBatch(ctx context.Context, processingCtx state.ProcessingContext, encodedTxs []byte, dbTx pgx.Tx, caller metrics.CallerLabel) (common.Hash, uint64, string, error) SetGenesis(ctx context.Context, block state.Block, genesis state.Genesis, dbTx pgx.Tx) ([]byte, error) OpenBatch(ctx context.Context, processingContext state.ProcessingContext, dbTx pgx.Tx) error CloseBatch(ctx context.Context, receipt state.ProcessingReceipt, dbTx pgx.Tx) error ProcessBatch(ctx context.Context, request state.ProcessRequest, updateMerkleTree bool) (*state.ProcessBatchResponse, error) StoreTransaction(ctx context.Context, batchNumber uint64, processedTx *state.ProcessTransactionResponse, coinbase common.Address, timestamp uint64, dbTx pgx.Tx) error GetStateRootByBatchNumber(ctx context.Context, batchNum uint64, dbTx pgx.Tx) (common.Hash, error) - ExecuteBatch(ctx context.Context, batch state.Batch, updateMerkleTree bool, dbTx pgx.Tx) (*pb.ProcessBatchResponse, error) + ExecuteBatch(ctx context.Context, batch state.Batch, updateMerkleTree bool, dbTx pgx.Tx) (*executor.ProcessBatchResponse, error) GetLastVerifiedBatch(ctx context.Context, dbTx pgx.Tx) (*state.VerifiedBatch, error) GetLastVirtualBatchNum(ctx context.Context, dbTx pgx.Tx) (uint64, error) AddSequence(ctx context.Context, sequence state.Sequence, dbTx pgx.Tx) error @@ -62,6 +62,7 @@ type stateInterface interface { BeginStateTransaction(ctx context.Context) (pgx.Tx, error) UpdateBatchL2Data(ctx context.Context, batchNumber uint64, batchL2Data []byte, dbTx pgx.Tx) error GetForkIDByBatchNumber(batchNumber uint64) uint64 + GetStoredFlushID(ctx context.Context) (uint64, string, error) } type ethTxManager interface { diff --git a/synchronizer/mock_state.go b/synchronizer/mock_state.go index 0040b92595..a65375c1d8 100644 --- a/synchronizer/mock_state.go +++ b/synchronizer/mock_state.go @@ -7,12 +7,12 @@ import ( common "github.com/ethereum/go-ethereum/common" + executor "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" + metrics "github.com/0xPolygonHermez/zkevm-node/state/metrics" mock "github.com/stretchr/testify/mock" - pb "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" - pgx "github.com/jackc/pgx/v4" state "github.com/0xPolygonHermez/zkevm-node/state" @@ -178,19 +178,19 @@ func (_m *stateMock) CloseBatch(ctx context.Context, receipt state.ProcessingRec } // ExecuteBatch provides a mock function with given fields: ctx, batch, updateMerkleTree, dbTx -func (_m *stateMock) ExecuteBatch(ctx context.Context, batch state.Batch, updateMerkleTree bool, dbTx pgx.Tx) (*pb.ProcessBatchResponse, error) { +func (_m *stateMock) ExecuteBatch(ctx context.Context, batch state.Batch, updateMerkleTree bool, dbTx pgx.Tx) (*executor.ProcessBatchResponse, error) { ret := _m.Called(ctx, batch, updateMerkleTree, dbTx) - var r0 *pb.ProcessBatchResponse + var r0 *executor.ProcessBatchResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, state.Batch, bool, pgx.Tx) (*pb.ProcessBatchResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, state.Batch, bool, pgx.Tx) (*executor.ProcessBatchResponse, error)); ok { return rf(ctx, batch, updateMerkleTree, dbTx) } - if rf, ok := ret.Get(0).(func(context.Context, state.Batch, bool, pgx.Tx) *pb.ProcessBatchResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, state.Batch, bool, pgx.Tx) *executor.ProcessBatchResponse); ok { r0 = rf(ctx, batch, updateMerkleTree, dbTx) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*pb.ProcessBatchResponse) + r0 = ret.Get(0).(*executor.ProcessBatchResponse) } } @@ -471,6 +471,37 @@ func (_m *stateMock) GetStateRootByBatchNumber(ctx context.Context, batchNum uin return r0, r1 } +// GetStoredFlushID provides a mock function with given fields: ctx +func (_m *stateMock) GetStoredFlushID(ctx context.Context) (uint64, string, error) { + ret := _m.Called(ctx) + + var r0 uint64 + var r1 string + var r2 error + if rf, ok := ret.Get(0).(func(context.Context) (uint64, string, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) uint64); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(uint64) + } + + if rf, ok := ret.Get(1).(func(context.Context) string); ok { + r1 = rf(ctx) + } else { + r1 = ret.Get(1).(string) + } + + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + // OpenBatch provides a mock function with given fields: ctx, processingContext, dbTx func (_m *stateMock) OpenBatch(ctx context.Context, processingContext state.ProcessingContext, dbTx pgx.Tx) error { ret := _m.Called(ctx, processingContext, dbTx) @@ -486,12 +517,14 @@ func (_m *stateMock) OpenBatch(ctx context.Context, processingContext state.Proc } // ProcessAndStoreClosedBatch provides a mock function with given fields: ctx, processingCtx, encodedTxs, dbTx, caller -func (_m *stateMock) ProcessAndStoreClosedBatch(ctx context.Context, processingCtx state.ProcessingContext, encodedTxs []byte, dbTx pgx.Tx, caller metrics.CallerLabel) (common.Hash, error) { +func (_m *stateMock) ProcessAndStoreClosedBatch(ctx context.Context, processingCtx state.ProcessingContext, encodedTxs []byte, dbTx pgx.Tx, caller metrics.CallerLabel) (common.Hash, uint64, string, error) { ret := _m.Called(ctx, processingCtx, encodedTxs, dbTx, caller) var r0 common.Hash - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, state.ProcessingContext, []byte, pgx.Tx, metrics.CallerLabel) (common.Hash, error)); ok { + var r1 uint64 + var r2 string + var r3 error + if rf, ok := ret.Get(0).(func(context.Context, state.ProcessingContext, []byte, pgx.Tx, metrics.CallerLabel) (common.Hash, uint64, string, error)); ok { return rf(ctx, processingCtx, encodedTxs, dbTx, caller) } if rf, ok := ret.Get(0).(func(context.Context, state.ProcessingContext, []byte, pgx.Tx, metrics.CallerLabel) common.Hash); ok { @@ -502,13 +535,25 @@ func (_m *stateMock) ProcessAndStoreClosedBatch(ctx context.Context, processingC } } - if rf, ok := ret.Get(1).(func(context.Context, state.ProcessingContext, []byte, pgx.Tx, metrics.CallerLabel) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, state.ProcessingContext, []byte, pgx.Tx, metrics.CallerLabel) uint64); ok { r1 = rf(ctx, processingCtx, encodedTxs, dbTx, caller) } else { - r1 = ret.Error(1) + r1 = ret.Get(1).(uint64) } - return r0, r1 + if rf, ok := ret.Get(2).(func(context.Context, state.ProcessingContext, []byte, pgx.Tx, metrics.CallerLabel) string); ok { + r2 = rf(ctx, processingCtx, encodedTxs, dbTx, caller) + } else { + r2 = ret.Get(2).(string) + } + + if rf, ok := ret.Get(3).(func(context.Context, state.ProcessingContext, []byte, pgx.Tx, metrics.CallerLabel) error); ok { + r3 = rf(ctx, processingCtx, encodedTxs, dbTx, caller) + } else { + r3 = ret.Error(3) + } + + return r0, r1, r2, r3 } // ProcessBatch provides a mock function with given fields: ctx, request, updateMerkleTree @@ -666,13 +711,12 @@ func (_m *stateMock) UpdateForkIDIntervals(intervals []state.ForkIDInterval) { _m.Called(intervals) } -type mockConstructorTestingTnewStateMock interface { +// newStateMock creates a new instance of stateMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func newStateMock(t interface { mock.TestingT Cleanup(func()) -} - -// newStateMock creates a new instance of stateMock. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func newStateMock(t mockConstructorTestingTnewStateMock) *stateMock { +}) *stateMock { mock := &stateMock{} mock.Mock.Test(t) diff --git a/synchronizer/synchronizer.go b/synchronizer/synchronizer.go index 09854dad71..7695799027 100644 --- a/synchronizer/synchronizer.go +++ b/synchronizer/synchronizer.go @@ -9,6 +9,7 @@ import ( "time" "github.com/0xPolygonHermez/zkevm-node/etherman" + "github.com/0xPolygonHermez/zkevm-node/event" "github.com/0xPolygonHermez/zkevm-node/hex" "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" "github.com/0xPolygonHermez/zkevm-node/log" @@ -33,19 +34,30 @@ type Synchronizer interface { // ClientSynchronizer connects L1 and L2 type ClientSynchronizer struct { isTrustedSequencer bool - etherMan ethermanInterface - state stateInterface - pool poolInterface - ethTxManager ethTxManager - zkEVMClient zkEVMClientInterface - ctx context.Context - cancelCtx context.CancelFunc - genesis state.Genesis - cfg Config - trustedState struct { + latestFlushID uint64 + // If true the lastFlushID is stored in DB and we don't need to check again + latestFlushIDIsFulfilled bool + etherMan ethermanInterface + state stateInterface + pool poolInterface + ethTxManager ethTxManager + zkEVMClient zkEVMClientInterface + eventLog *event.EventLog + ctx context.Context + cancelCtx context.CancelFunc + genesis state.Genesis + cfg Config + trustedState struct { lastTrustedBatches []*state.Batch lastStateRoot *common.Hash } + // Id of the 'process' of the executor. Each time that it starts this value changes + // This value is obtained from the call state.GetStoredFlushID + // It starts as an empty string and it is filled in the first call + // later the value is checked to be the same (in function checkFlushID) + proverID string + // Previous value returned by state.GetStoredFlushID, is used for decide if write a log or not + previousExecutorFlushID uint64 } // NewSynchronizer creates and initializes an instance of Synchronizer @@ -56,22 +68,26 @@ func NewSynchronizer( pool poolInterface, ethTxManager ethTxManager, zkEVMClient zkEVMClientInterface, + eventLog *event.EventLog, genesis state.Genesis, cfg Config) (Synchronizer, error) { ctx, cancel := context.WithCancel(context.Background()) metrics.Register() return &ClientSynchronizer{ - isTrustedSequencer: isTrustedSequencer, - state: st, - etherMan: ethMan, - pool: pool, - ctx: ctx, - cancelCtx: cancel, - ethTxManager: ethTxManager, - zkEVMClient: zkEVMClient, - genesis: genesis, - cfg: cfg, + isTrustedSequencer: isTrustedSequencer, + state: st, + etherMan: ethMan, + pool: pool, + ctx: ctx, + cancelCtx: cancel, + ethTxManager: ethTxManager, + zkEVMClient: zkEVMClient, + eventLog: eventLog, + genesis: genesis, + cfg: cfg, + proverID: "", + previousExecutorFlushID: 0, }, nil } @@ -221,8 +237,8 @@ func (s *ClientSynchronizer) Sync() error { // Sync trusted state if latestSyncedBatch >= latestSequencedBatchNumber { - log.Info("L1 state fully synchronized") startTrusted := time.Now() + log.Info("Syncing trusted state") err = s.syncTrustedState(latestSyncedBatch) metrics.FullTrustedSyncTime(time.Since(startTrusted)) if err != nil { @@ -246,6 +262,7 @@ func (s *ClientSynchronizer) Sync() error { } } metrics.FullSyncIterationTime(time.Since(start)) + log.Info("L1 state fully synchronized") } } } @@ -401,6 +418,17 @@ func (s *ClientSynchronizer) syncTrustedState(latestSyncedBatch uint64) error { } return err } + log.Debug("Checking FlushID to commit trustedState data to db") + err = s.checkFlushID(dbTx) + if err != nil { + log.Errorf("error checking flushID. Error: %v", err) + rollbackErr := dbTx.Rollback(s.ctx) + if rollbackErr != nil { + log.Errorf("error rolling back state. RollbackErr: %s, Error : %v", rollbackErr.Error(), err) + return rollbackErr + } + return err + } if err := dbTx.Commit(s.ctx); err != nil { log.Errorf("error committing db transaction to sync trusted batch %v: %v", batchNumberToSync, err) @@ -475,6 +503,17 @@ func (s *ClientSynchronizer) processBlockRange(blocks []etherman.Block, order ma } } } + log.Debug("Checking FlushID to commit L1 data to db") + err = s.checkFlushID(dbTx) + if err != nil { + log.Errorf("error checking flushID. Error: %v", err) + rollbackErr := dbTx.Rollback(s.ctx) + if rollbackErr != nil { + log.Errorf("error rolling back state. RollbackErr: %s, Error : %v", rollbackErr.Error(), err) + return rollbackErr + } + return err + } err = dbTx.Commit(s.ctx) if err != nil { log.Errorf("error committing state to store block. BlockNumber: %d, err: %v", blocks[i].BlockNumber, err) @@ -814,7 +853,7 @@ func (s *ClientSynchronizer) processSequenceBatches(sequencedBatches []etherman. if errors.Is(err, state.ErrNotFound) || errors.Is(err, state.ErrStateNotSynchronized) { log.Debugf("BatchNumber: %d, not found in trusted state. Storing it...", batch.BatchNumber) // If it is not found, store batch - newStateRoot, err := s.state.ProcessAndStoreClosedBatch(s.ctx, processCtx, batch.BatchL2Data, dbTx, stateMetrics.SynchronizerCallerLabel) + newStateRoot, flushID, proverID, err := s.state.ProcessAndStoreClosedBatch(s.ctx, processCtx, batch.BatchL2Data, dbTx, stateMetrics.SynchronizerCallerLabel) if err != nil { log.Errorf("error storing trustedBatch. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, blockNumber, err) rollbackErr := dbTx.Rollback(s.ctx) @@ -825,6 +864,8 @@ func (s *ClientSynchronizer) processSequenceBatches(sequencedBatches []etherman. log.Errorf("error storing batch. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, blockNumber, err) return err } + s.pendingFlushID(flushID, proverID) + newRoot = newStateRoot tBatch = &batch tBatch.StateRoot = newRoot @@ -894,7 +935,7 @@ func (s *ClientSynchronizer) processSequenceBatches(sequencedBatches []etherman. log.Errorf("error resetting trusted state. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, blockNumber, err) return err } - _, err = s.state.ProcessAndStoreClosedBatch(s.ctx, processCtx, batch.BatchL2Data, dbTx, stateMetrics.SynchronizerCallerLabel) + _, flushID, proverID, err := s.state.ProcessAndStoreClosedBatch(s.ctx, processCtx, batch.BatchL2Data, dbTx, stateMetrics.SynchronizerCallerLabel) if err != nil { log.Errorf("error storing trustedBatch. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, blockNumber, err) rollbackErr := dbTx.Rollback(s.ctx) @@ -905,6 +946,7 @@ func (s *ClientSynchronizer) processSequenceBatches(sequencedBatches []etherman. log.Errorf("error storing batch. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, blockNumber, err) return err } + s.pendingFlushID(flushID, proverID) } // Store virtualBatch @@ -1018,7 +1060,7 @@ func (s *ClientSynchronizer) processSequenceForceBatch(sequenceForceBatch []ethe ForcedBatchNum: &forcedBatches[i].ForcedBatchNumber, } // Process batch - _, err := s.state.ProcessAndStoreClosedBatch(s.ctx, batch, forcedBatches[i].RawTxsData, dbTx, stateMetrics.SynchronizerCallerLabel) + _, flushID, proverID, err := s.state.ProcessAndStoreClosedBatch(s.ctx, batch, forcedBatches[i].RawTxsData, dbTx, stateMetrics.SynchronizerCallerLabel) if err != nil { log.Errorf("error processing batch in processSequenceForceBatch. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, block.BlockNumber, err) rollbackErr := dbTx.Rollback(s.ctx) @@ -1029,6 +1071,8 @@ func (s *ClientSynchronizer) processSequenceForceBatch(sequenceForceBatch []ethe log.Errorf("error processing batch in processSequenceForceBatch. BatchNumber: %d, BlockNumber: %d, error: %v", batch.BatchNumber, block.BlockNumber, err) return err } + s.pendingFlushID(flushID, proverID) + // Store virtualBatch err = s.state.AddVirtualBatch(s.ctx, &virtualBatch, dbTx) if err != nil { @@ -1122,13 +1166,13 @@ func (s *ClientSynchronizer) processTrustedVerifyBatches(lastVerifiedBatch ether nbatches := lastVerifiedBatch.BatchNumber - lastVBatch.BatchNumber batch, err := s.state.GetBatchByNumber(s.ctx, lastVerifiedBatch.BatchNumber, dbTx) if err != nil { - log.Errorf("error getting GetBatchByNumber stored in db in processTrustedVerifyBatches. Processing blockNumber: %d", lastVerifiedBatch.BatchNumber) + log.Errorf("error getting GetBatchByNumber stored in db in processTrustedVerifyBatches. Processing batchNumber: %d", lastVerifiedBatch.BatchNumber) rollbackErr := dbTx.Rollback(s.ctx) if rollbackErr != nil { - log.Errorf("error rolling back state. Processing blockNumber: %d, rollbackErr: %s, error : %v", lastVerifiedBatch.BatchNumber, rollbackErr.Error(), err) + log.Errorf("error rolling back state. Processing batchNumber: %d, rollbackErr: %s, error : %v", lastVerifiedBatch.BatchNumber, rollbackErr.Error(), err) return rollbackErr } - log.Errorf("error getting GetBatchByNumber stored in db in processTrustedVerifyBatches. Processing blockNumber: %d, error: %v", lastVerifiedBatch.BatchNumber, err) + log.Errorf("error getting GetBatchByNumber stored in db in processTrustedVerifyBatches. Processing batchNumber: %d, error: %v", lastVerifiedBatch.BatchNumber, err) return err } @@ -1137,14 +1181,14 @@ func (s *ClientSynchronizer) processTrustedVerifyBatches(lastVerifiedBatch ether log.Warn("nbatches: ", nbatches) log.Warnf("Batch from db: %+v", batch) log.Warnf("Verified Batch: %+v", lastVerifiedBatch) - log.Errorf("error: stateRoot calculated and state root verified don't match in processTrustedVerifyBatches. Processing blockNumber: %d", lastVerifiedBatch.BatchNumber) + log.Errorf("error: stateRoot calculated and state root verified don't match in processTrustedVerifyBatches. Processing batchNumber: %d", lastVerifiedBatch.BatchNumber) rollbackErr := dbTx.Rollback(s.ctx) if rollbackErr != nil { - log.Errorf("error rolling back state. Processing blockNumber: %d, rollbackErr: %v", lastVerifiedBatch.BatchNumber, rollbackErr) + log.Errorf("error rolling back state. Processing batchNumber: %d, rollbackErr: %v", lastVerifiedBatch.BatchNumber, rollbackErr) return rollbackErr } - log.Errorf("error: stateRoot calculated and state root verified don't match in processTrustedVerifyBatches. Processing blockNumber: %d", lastVerifiedBatch.BatchNumber) - return fmt.Errorf("error: stateRoot calculated and state root verified don't match in processTrustedVerifyBatches. Processing blockNumber: %d", lastVerifiedBatch.BatchNumber) + log.Errorf("error: stateRoot calculated and state root verified don't match in processTrustedVerifyBatches. Processing batchNumber: %d", lastVerifiedBatch.BatchNumber) + return fmt.Errorf("error: stateRoot calculated and state root verified don't match in processTrustedVerifyBatches. Processing batchNumber: %d", lastVerifiedBatch.BatchNumber) } var i uint64 for i = 1; i <= nbatches; i++ { @@ -1244,6 +1288,9 @@ func (s *ClientSynchronizer) processTrustedBatch(trustedBatch *types.Batch, dbTx log.Errorf("error closing batch %d", trustedBatch.Number) return nil, nil, err } + batches[0].AccInputHash = trustedBatch.AccInputHash + batches[0].StateRoot = trustedBatch.StateRoot + batches[0].LocalExitRoot = trustedBatch.LocalExitRoot } return batches, &trustedBatch.StateRoot, nil } @@ -1339,6 +1386,7 @@ func (s *ClientSynchronizer) processAndStoreTxs(trustedBatch *types.Batch, reque log.Errorf("error processing sequencer batch for batch: %v", trustedBatch.Number) return nil, err } + s.pendingFlushID(processBatchResp.FlushID, processBatchResp.ProverID) log.Debugf("Storing transactions %d for batch %v", len(processBatchResp.Responses), trustedBatch.Number) for _, tx := range processBatchResp.Responses { @@ -1400,6 +1448,13 @@ func checkIfSynced(batches []*state.Batch, trustedBatch *types.Batch) bool { matchCoinbase && matchTimestamp && matchL2Data { return true } + log.Info("matchNumber", matchNumber) + log.Info("matchGER", matchGER) + log.Info("matchLER", matchLER) + log.Info("matchSR", matchSR) + log.Info("matchCoinbase", matchCoinbase) + log.Info("matchTimestamp", matchTimestamp) + log.Info("matchL2Data", matchL2Data) return false } @@ -1428,3 +1483,77 @@ func (s *ClientSynchronizer) getCurrentBatches(batches []*state.Batch, trustedBa } return batches, nil } + +func (s *ClientSynchronizer) pendingFlushID(flushID uint64, proverID string) { + log.Infof("pending flushID: %d", flushID) + s.latestFlushID = flushID + s.latestFlushIDIsFulfilled = false + s.updateAndCheckProverID(proverID) +} + +func (s *ClientSynchronizer) updateAndCheckProverID(proverID string) { + if s.proverID == "" { + log.Infof("Current proverID is %s", proverID) + s.proverID = proverID + return + } + if s.proverID != proverID { + event := &event.Event{ + ReceivedAt: time.Now(), + Source: event.Source_Node, + Component: event.Component_Synchronizer, + Level: event.Level_Critical, + EventID: event.EventID_SynchonizerRestart, + Description: fmt.Sprintf("proverID changed from %s to %s, restarting Synchonizer ", s.proverID, proverID), + } + + err := s.eventLog.LogEvent(context.Background(), event) + if err != nil { + log.Errorf("error storing event payload: %v", err) + } + + log.Fatal("restarting synchronizer because executor have restarted (old=%s, new=%s)", s.proverID, proverID) + } +} + +func (s *ClientSynchronizer) checkFlushID(dbTx pgx.Tx) error { + if s.latestFlushIDIsFulfilled { + log.Debugf("no pending flushID, nothing to do. Last pending fulfilled flushID: %d, last executor flushId received: %d", s.latestFlushID, s.latestFlushID) + return nil + } + storedFlushID, proverID, err := s.state.GetStoredFlushID(s.ctx) + if err != nil { + log.Error("error getting stored flushID. Error: ", err) + return err + } + if (s.previousExecutorFlushID != storedFlushID) || (s.proverID != proverID) { + log.Infof("executor vs local: flushid=%d/%d, proverID=%s/%s", storedFlushID, + s.latestFlushID, proverID, s.proverID) + } else { + log.Debugf("executor vs local: flushid=%d/%d, proverID=%s/%s", storedFlushID, + s.latestFlushID, proverID, s.proverID) + } + s.updateAndCheckProverID(proverID) + log.Debugf("storedFlushID (executor reported): %d, latestFlushID (pending): %d", storedFlushID, s.latestFlushID) + if storedFlushID < s.latestFlushID { + log.Infof("Synchornized BLOCKED!: Wating for the flushID to be stored. FlushID to be stored: %d. Latest flushID stored: %d", s.latestFlushID, storedFlushID) + iteration := 0 + start := time.Now() + for storedFlushID < s.latestFlushID { + log.Debugf("Waiting for the flushID to be stored. FlushID to be stored: %d. Latest flushID stored: %d iteration:%d elpased:%s", + s.latestFlushID, storedFlushID, iteration, time.Since(start)) + time.Sleep(100 * time.Millisecond) //nolint:gomnd + storedFlushID, _, err = s.state.GetStoredFlushID(s.ctx) + if err != nil { + log.Error("error getting stored flushID. Error: ", err) + return err + } + iteration++ + } + log.Infof("Synchornizer resumed, flushID stored: %d", s.latestFlushID) + } + log.Infof("Pending Flushid fullfiled: %d, executor have write %d", s.latestFlushID, storedFlushID) + s.latestFlushIDIsFulfilled = true + s.previousExecutorFlushID = storedFlushID + return nil +} diff --git a/synchronizer/synchronizer_test.go b/synchronizer/synchronizer_test.go index 4660f9017c..d8199eea79 100644 --- a/synchronizer/synchronizer_test.go +++ b/synchronizer/synchronizer_test.go @@ -12,7 +12,7 @@ import ( "github.com/0xPolygonHermez/zkevm-node/jsonrpc/types" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/state/metrics" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" + "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" "github.com/ethereum/go-ethereum/common" ethTypes "github.com/ethereum/go-ethereum/core/types" "github.com/jackc/pgx/v4" @@ -21,6 +21,10 @@ import ( "github.com/stretchr/testify/require" ) +const ( + cProverIDExecution = "PROVER_ID-EXE001" +) + type mocks struct { Etherman *ethermanMock State *stateMock @@ -28,14 +32,18 @@ type mocks struct { EthTxManager *ethTxManagerMock DbTx *dbTxMock ZKEVMClient *zkEVMClientMock + //EventLog *eventLogMock } +//func Test_Given_StartingSynchronizer_When_CallFirstTimeExecutor_Then_StoreProverID(t *testing.T) { +//} + // Feature #2220 and #2239: Optimize Trusted state synchronization // // this Check partially point 2: Use previous batch stored in memory to avoid getting from database func Test_Given_PermissionlessNode_When_SyncronizeAgainSameBatch_Then_UseTheOneInMemoryInstaeadOfGettingFromDb(t *testing.T) { genesis, cfg, m := setupGenericTest(t) - sync_interface, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, *genesis, *cfg) + sync_interface, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg) require.NoError(t, err) sync, ok := sync_interface.(*ClientSynchronizer) require.EqualValues(t, true, ok, "Can't convert to underlaying struct the interface of syncronizer") @@ -58,7 +66,7 @@ func Test_Given_PermissionlessNode_When_SyncronizeAgainSameBatch_Then_UseTheOneI // this Check partially point 2: Store last batch in memory (CurrentTrustedBatch) func Test_Given_PermissionlessNode_When_SyncronizeFirstTimeABatch_Then_StoreItInALocalVar(t *testing.T) { genesis, cfg, m := setupGenericTest(t) - sync_interface, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, *genesis, *cfg) + sync_interface, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, *genesis, *cfg) require.NoError(t, err) sync, ok := sync_interface.(*ClientSynchronizer) require.EqualValues(t, true, ok, "Can't convert to underlaying struct the interface of syncronizer") @@ -91,7 +99,7 @@ func TestForcedBatch(t *testing.T) { ZKEVMClient: newZkEVMClientMock(t), } - sync, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, genesis, cfg) + sync, err := NewSynchronizer(false, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, genesis, cfg) require.NoError(t, err) // state preparation @@ -270,10 +278,9 @@ func TestForcedBatch(t *testing.T) { GlobalExitRoot: [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, ForcedBatchNum: &forced, } - m.State. //ExecuteBatch(s.ctx, batch.BatchNumber, batch.BatchL2Data, dbTx - On("ExecuteBatch", ctx, sbatch, false, m.DbTx). - Return(&pb.ProcessBatchResponse{NewStateRoot: trustedBatch.StateRoot.Bytes()}, nil). - Once() + m.State.On("ExecuteBatch", ctx, sbatch, false, m.DbTx). + Return(&executor.ProcessBatchResponse{NewStateRoot: trustedBatch.StateRoot.Bytes()}, nil). + Once() virtualBatch := &state.VirtualBatch{ BatchNumber: sequencedBatch.BatchNumber, @@ -301,6 +308,11 @@ func TestForcedBatch(t *testing.T) { Return(nil). Once() + m.State. + On("GetStoredFlushID", ctx). + Return(uint64(1), cProverIDExecution, nil). + Once() + m.DbTx. On("Commit", ctx). Run(func(args mock.Arguments) { sync.Stop() }). @@ -331,7 +343,7 @@ func TestSequenceForcedBatch(t *testing.T) { ZKEVMClient: newZkEVMClientMock(t), } - sync, err := NewSynchronizer(true, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, genesis, cfg) + sync, err := NewSynchronizer(true, m.Etherman, m.State, m.Pool, m.EthTxManager, m.ZKEVMClient, nil, genesis, cfg) require.NoError(t, err) // state preparation @@ -501,7 +513,7 @@ func TestSequenceForcedBatch(t *testing.T) { m.State. On("ProcessAndStoreClosedBatch", ctx, processingContext, sequencedForceBatch.Transactions, m.DbTx, metrics.SynchronizerCallerLabel). - Return(common.Hash{}, nil). + Return(common.Hash{}, uint64(1), cProverIDExecution, nil). Once() virtualBatch := &state.VirtualBatch{ @@ -526,6 +538,11 @@ func TestSequenceForcedBatch(t *testing.T) { Return(nil). Once() + m.State. + On("GetStoredFlushID", ctx). + Return(uint64(1), cProverIDExecution, nil). + Once() + m.DbTx. On("Commit", ctx). Run(func(args mock.Arguments) { sync.Stop() }). @@ -554,6 +571,7 @@ func setupGenericTest(t *testing.T) (*state.Genesis, *Config, *mocks) { Pool: newPoolMock(t), DbTx: newDbTxMock(t), ZKEVMClient: newZkEVMClientMock(t), + //EventLog: newEventLogMock(t), } return &genesis, &cfg, &m } @@ -651,7 +669,8 @@ func expectedCallsForsyncTrustedState(t *testing.T, m *mocks, sync *ClientSynchr Return(batchInTrustedNode, nil). Once() - m.State.On("BeginStateTransaction", sync.ctx). + m.State. + On("BeginStateTransaction", sync.ctx). Return(m.DbTx, nil). Once() @@ -674,6 +693,8 @@ func expectedCallsForsyncTrustedState(t *testing.T, m *mocks, sync *ClientSynchr tx1 := state.ProcessTransactionResponse{} processedBatch := state.ProcessBatchResponse{ + FlushID: 1, + ProverID: cProverIDExecution, Responses: []*state.ProcessTransactionResponse{&tx1}, } m.State. @@ -686,5 +707,13 @@ func expectedCallsForsyncTrustedState(t *testing.T, m *mocks, sync *ClientSynchr Return(nil). Once() - m.DbTx.On("Commit", sync.ctx).Return(nil).Once() + m.State. + On("GetStoredFlushID", sync.ctx). + Return(uint64(1), cProverIDExecution, nil). + Once() + + m.DbTx. + On("Commit", sync.ctx). + Return(nil). + Once() } diff --git a/test/config/debug.node.config.toml b/test/config/debug.node.config.toml index 81bcc812bb..1fb57cb65b 100644 --- a/test/config/debug.node.config.toml +++ b/test/config/debug.node.config.toml @@ -31,11 +31,6 @@ PollMinAllowedGasPriceInterval = "15s" Port = "5433" EnableLog = false MaxConns = 10 - [Pool.EffectiveGasPrice] - L1GasPriceFactor = 0.25 - ByteGasCost = 16 - MarginFactor = 1 - [Etherman] URL = "http://localhost:8545" @@ -96,6 +91,7 @@ MaxTxLifetime = "3h" ClosingSignalsManagerWaitForCheckingForcedBatches = "10s" ForcedBatchesFinalityNumberOfBlocks = 0 TimestampResolution = "10s" + StopSequencerOnBatchNum = 0 [Sequencer.DBManager] PoolRetrievalInterval = "500ms" L2ReorgRetrievalInterval = "5s" @@ -103,6 +99,9 @@ MaxTxLifetime = "3h" ResourceCostMultiplier = 1000 [Sequencer.EffectiveGasPrice] MaxBreakEvenGasPriceDeviationPercentage = 10 + L1GasPriceFactor = 0.25 + ByteGasCost = 16 + MarginFactor = 1 Enabled = false [SequenceSender] @@ -134,6 +133,7 @@ PrivateKeys = [ [L2GasPriceSuggester] Type = "default" DefaultGasPriceWei = 1000000000 +MaxGasPriceWei = 0 [MTClient] URI = "127.0.0.1:50061" diff --git a/test/config/test.node.config.toml b/test/config/test.node.config.toml index f602079827..d5133f7b47 100644 --- a/test/config/test.node.config.toml +++ b/test/config/test.node.config.toml @@ -31,10 +31,6 @@ PollMinAllowedGasPriceInterval = "15s" Port = "5432" EnableLog = false MaxConns = 200 - [Pool.EffectiveGasPrice] - L1GasPriceFactor = 0.25 - ByteGasCost = 16 - MarginFactor = 1 [Etherman] URL = "http://zkevm-mock-l1-network:8545" @@ -96,6 +92,7 @@ MaxTxLifetime = "3h" ClosingSignalsManagerWaitForCheckingForcedBatches = "10s" ForcedBatchesFinalityNumberOfBlocks = 0 TimestampResolution = "10s" + StopSequencerOnBatchNum = 0 [Sequencer.DBManager] PoolRetrievalInterval = "500ms" L2ReorgRetrievalInterval = "5s" @@ -103,6 +100,9 @@ MaxTxLifetime = "3h" ResourceCostMultiplier = 1000 [Sequencer.EffectiveGasPrice] MaxBreakEvenGasPriceDeviationPercentage = 10 + L1GasPriceFactor = 0.25 + ByteGasCost = 16 + MarginFactor = 1 Enabled = false [SequenceSender] @@ -136,6 +136,7 @@ Type = "default" UpdatePeriod = "10s" Factor = 0.5 DefaultGasPriceWei = 1000000000 +MaxGasPriceWei = 0 [MTClient] URI = "zkevm-prover:50061" diff --git a/test/config/test.permissionless.prover.config.json b/test/config/test.permissionless.prover.config.json index 9dfe74dd87..24d4fbc67f 100644 --- a/test/config/test.permissionless.prover.config.json +++ b/test/config/test.permissionless.prover.config.json @@ -74,12 +74,16 @@ "dbProgramTableName": "state.program", "dbMultiWrite": true, "dbFlushInParallel": false, - "dbMTCacheSize": 2048, - "dbProgramCacheSize": 1024, + "dbMTCacheSize": 1024, + "dbProgramCacheSize": 512, "cleanerPollingPeriod": 600, "requestsPersistence": 3600, "maxExecutorThreads": 20, "maxProverThreads": 8, - "maxHashDBThreads": 8 + "maxHashDBThreads": 8, + + "ECRecoverPrecalc": true, + "ECRecoverPrecalcNThreads": 16, + "dbMultiWriteSinglePosition": true } diff --git a/test/config/test.prover.config.json b/test/config/test.prover.config.json index c094448fab..79d369b535 100644 --- a/test/config/test.prover.config.json +++ b/test/config/test.prover.config.json @@ -74,14 +74,18 @@ "dbProgramTableName": "state.program", "dbMultiWrite": true, "dbFlushInParallel": false, - "dbMTCacheSize": 2048, - "dbProgramCacheSize": 1024, + "dbMTCacheSize": 1024, + "dbProgramCacheSize": 512, "dbNumberOfPoolConnections": 30, "dbGetTree": true, "cleanerPollingPeriod": 600, "requestsPersistence": 3600, "maxExecutorThreads": 20, "maxProverThreads": 8, - "maxHashDBThreads": 8 + "maxHashDBThreads": 8, + + "ECRecoverPrecalc": true, + "ECRecoverPrecalcNThreads": 16, + "dbMultiWriteSinglePosition": true } diff --git a/test/docker-compose.yml b/test/docker-compose.yml index c8681f55a4..3abb33ed0c 100644 --- a/test/docker-compose.yml +++ b/test/docker-compose.yml @@ -338,7 +338,7 @@ services: zkevm-prover: container_name: zkevm-prover - image: hermeznetwork/zkevm-prover:v0.2.0-RC3 + image: hermeznetwork/zkevm-prover:v2.0.1 ports: # - 50051:50051 # Prover - 50052:50052 # Mock prover @@ -425,7 +425,7 @@ services: zkevm-permissionless-prover: container_name: zkevm-permissionless-prover - image: hermeznetwork/zkevm-prover:v0.2.0-RC3 + image: hermeznetwork/zkevm-prover:v2.0.1 ports: # - 50058:50058 # Prover - 50059:50052 # Mock prover @@ -447,3 +447,17 @@ services: - --web.enable-lifecycle volumes: - ../config/metrics/prometheus:/etc/prometheus + + zkevm-sh: + container_name: zkevm-sh + image: zkevm-node + stdin_open: true + tty: true + environment: + - ZKEVM_NODE_STATEDB_HOST=zkevm-state-db + - ZKEVM_NODE_POOL_DB_HOST=zkevm-pool-db + volumes: + - ./config/test.node.config.toml:/app/config.toml + - ./config/test.genesis.config.json:/app/genesis.json + command: + - "/bin/sh" diff --git a/tools/executor/main.go b/tools/executor/main.go index 2e9404249a..8f28ae12f1 100644 --- a/tools/executor/main.go +++ b/tools/executor/main.go @@ -13,7 +13,6 @@ import ( "github.com/0xPolygonHermez/zkevm-node/log" "github.com/0xPolygonHermez/zkevm-node/state" "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor" - "github.com/0xPolygonHermez/zkevm-node/state/runtime/executor/pb" "github.com/0xPolygonHermez/zkevm-node/test/testutils" ) @@ -112,7 +111,7 @@ func runTestCase(ctx context.Context, genesis []genesisItem, tc testCase) error xecutor, _, _ := executor.NewExecutorClient(ctx, executor.Config{URI: executorURL, MaxGRPCMessageSize: 100000000}) //nolint:gomnd // Execute batches for i := 0; i < len(tc.Requests); i++ { - pbr := pb.ProcessBatchRequest(tc.Requests[i]) //nolint + pbr := executor.ProcessBatchRequest(tc.Requests[i]) //nolint res, err := xecutor.ProcessBatch(ctx, &pbr) if err != nil { return err @@ -230,7 +229,7 @@ type testCase struct { Requests []executorRequest `json:"batches"` } -type executorRequest pb.ProcessBatchRequest +type executorRequest executor.ProcessBatchRequest func (er *executorRequest) UnmarshalJSON(data []byte) error { type jExecutorRequeststruct struct { @@ -263,7 +262,7 @@ func (er *executorRequest) UnmarshalJSON(data []byte) error { return err } - req := pb.ProcessBatchRequest{ + req := executor.ProcessBatchRequest{ BatchL2Data: batchL2Data, GlobalExitRoot: globalExitRoot, OldBatchNum: jer.OldBatchNum,