diff --git a/tools/block-generator/Makefile b/tools/block-generator/Makefile index fdb5754210..8134a2e517 100644 --- a/tools/block-generator/Makefile +++ b/tools/block-generator/Makefile @@ -1,9 +1,19 @@ -SCENARIO = scenarios/config.allmixed.small.yml -SKIP = --skip-runner +# The following variables are primarily useful for tuning dev testing and +# appear in the targets pg-up, pg-enter, pg-down, pg-query, +# run-runner, run-file-exporter, and benchmark-blocks-export: +SCENARIO = scenarios/benchmarks/stress.50000.yml RESETDB = --reset-db +TIMES = 1 REPORTS = ../../tmp/RUN_RUNNER_OUTPUTS DURATION = 30s VERBOSE = --verbose +CONDUIT = ./conduit +TEMPLATE = # --template file-exporter (default postgres-exporter) +PGUSER = algorand +PGDB = generator_db +PGCONT = "generator-test-container" +PGCONN = "host=localhost user=$(PGUSER) password=algorand dbname=$(PGDB) port=15432 sslmode=disable" +PGRUNNER = --postgres-connection-string $(PGCONN) block-generator: clean-generator go build @@ -11,31 +21,38 @@ block-generator: clean-generator clean-generator: rm -f block-generator -debug-blockgen: - python scripts/run_runner.py \ - --conduit-binary ./conduit \ - --scenario $(SCENARIO) \ - --report-directory $(REPORTS) \ - --keep-alive $(SKIP) \ - --test-duration $(DURATION) \ - $(RESETDB) +pg-up: + docker run --name $(PGCONT) -p 15432:5432 -e POSTGRES_USER=$(PGUSER) -e POSTGRES_PASSWORD=algorand -e POSTGRES_DB=$(PGDB) -d postgres + +pg-enter: + docker exec -it $(PGCONT) psql -U $(PGUSER) -d $(PGDB) -enter-pg: - docker exec -it generator-test-container psql -U algorand -d generator_db +QUERY := -c "select count(*) from txn;" +pg-query: + psql $(PGCONN) $(QUERY) -clean-docker: - docker rm -f generator-test-container +pg-down: + docker rm -f $(PGCONT) run-runner: block-generator - ./block-generator runner --conduit-binary ./conduit \ + ./block-generator runner --conduit-binary $(CONDUIT) \ --keep-data-dir \ --test-duration $(DURATION) \ --conduit-log-level trace \ - --postgres-connection-string "host=localhost user=algorand password=algorand dbname=generator_db port=15432 sslmode=disable" \ + $(TEMPLATE) \ + $(PGRUNNER) \ --scenario $(SCENARIO) \ $(RESETDB) \ $(VERBOSE) \ --report-directory $(REPORTS) + --times $(TIMES) + +run-file-exporter: + make run-runner TEMPLATE="--template file-exporter" TIMES=1 RESETDB= PGRUNNER= + +BENCHMARK = "organic.25000" +benchmark-blocks-export: block-generator + make run-file-exporter DURATION=60s SCENARIO=scenarios/benchmarks/$(BENCHMARK).yml REPORTS=$(BENCHMARK) clean-reports: rm -rf $(REPORTS) diff --git a/tools/block-generator/README.md b/tools/block-generator/README.md index a26328ec43..3893c81913 100644 --- a/tools/block-generator/README.md +++ b/tools/block-generator/README.md @@ -9,6 +9,7 @@ Several scenarios were designed to mimic different block traffic patterns. Scena ### Organic Traffic Simulate the current mainnet traffic pattern. Approximately: + * 15% payment transactions * 10% application transactions * 75% asset transactions @@ -33,7 +34,7 @@ Block generator uses a YAML config file to describe the composition of each rand The block generator supports **payment**, **asset**, and **application** transactions. The settings are hopefully, more or less, obvious. Distributions are specified as fractions of 1.0, and the sum of all options must add up to ~1.0. -Here is an example which uses all of the current options. Notice that the synthetic blocks are not required to follow algod limits, in this case the block size is specified as 99,999: +Here is an example which uses all of the current options. Notice that the synthetic blocks are not required to follow algod limits, and that in this case the block size is specified as 99,999: ```yml name: "Mixed (99,999)" @@ -104,6 +105,7 @@ Flags: -c, --config string Specify the block configuration yaml file. -h, --help help for daemon -p, --port uint Port to start the server at. (default 4010) + -v, --verbose If set the daemon will print debugging information from the generator and ledger. ``` ### runner @@ -143,7 +145,7 @@ final_overall_transactions_per_second:8493.40 final_uptime_seconds:3600.06 ``` -Here is the help output for **runner**: +We recommend printing out the help information for the **runner**: ```bash ~$ ./block-generator runner -h @@ -152,36 +154,19 @@ Run an automated test suite using the block-generator daemon and a provided cond Usage: block-generator runner [flags] -Flags: - -i, --conduit-binary string Path to conduit binary. - -l, --conduit-log-level string LogLevel to use when starting Conduit. [panic, fatal, error, warn, info, debug, trace] (default "error") - --cpuprofile string Path where Conduit writes its CPU profile. - -f, --genesis-file string file path to the genesis associated with the db snapshot - -h, --help help for runner - -k, --keep-data-dir If set the validator will not delete the data directory after tests complete. - -p, --metrics-port uint Port to start the metrics server at. (default 9999) - -c, --postgres-connection-string string Postgres connection string. - -r, --report-directory string Location to place test reports. - --reset-db If set database will be deleted before running tests. - --reset-report-dir If set any existing report directory will be deleted before running tests. - -s, --scenario string Directory containing scenarios, or specific scenario file. - -d, --test-duration duration Duration to use for each scenario. (default 5m0s) - --validate If set the validator will run after test-duration has elapsed to verify data is correct. An extra line in each report indicates validator success or failure. - -v, --verbose If set the runner will print debugging information from the generator and ledger. - ``` - -## Example Run using Conduit and Postgres +... etc ... +``` + +## Example Runs using Conduit A typical **runner** scenario involves: -* a [scenario configuration](#scenario-configuration) file, e.g. [test_config.yml](./test_config.yml) -* access to a `conduit` binary to query the block generator's mock Algod endpoint and ingest the synthetic blocks +* a [scenario configuration](#scenario-configuration) file, e.g. [config.asset.xfer.yml](./scenarios/config.asset.xfer.yml) or for the example below [test_scenario.yml](./generator/test_scenario.yml) +* access to a `conduit` binary to query the block generator's mock Algod endpoint and ingest the synthetic blocks (below it's assumed to be set in the `CONDUIT_BINARY` environment variable) * a datastore -such as a postgres database- to collect `conduit`'s output * a `conduit` config file to define its import/export behavior -The `block-generator runner` subcommand has a number of options to configure behavion. - -### Sample Run +### Sample Run with Postgres First you'll need to get a `conduit` binary. For example you can follow the [developer portal's instructions](https://developer.algorand.org/docs/get-details/conduit/GettingStarted/#installation) or run `go build .` inside of the directory `cmd/conduit` after downloading the `conduit` repo. @@ -204,5 +189,133 @@ block-generator runner \ ### Scenario Report -If all goes well, the run will generate a directory named reports. +If all goes well, the run will generate a directory named `reports` +in the same directory in which the command was run. In that directory you can see the statistics of the run in the file ending with `.report`. + +The `block-generator runner` subcommand has a number of options to configure behavior. + +## Sample Run with the File Exporter + +It's possible to save the generated blocks to the file system. +This enables running benchmarks and stress tests at a later time and without +needing a live block generator. The setup is very similar to the previous Postgres example. The main change compared to the previous is to _**specify a different conduit configuration**_ template. + +The `block-generator runner` command in this case would look like: + +```sh +block-generator runner \ + --conduit-binary "$CONDUIT_BINARY" \ + --report-directory reports \ + --test-duration 30s \ + --conduit-log-level trace \ + --template file-exporter \ + --keep-data-dir \ + --scenario generator/test_scenario.yml +``` + +### Generated Blocks + +If all goes well, the run will generate a directory named `reports` +in the same directory in which the command was run. +In addition to the statistical report and run logs, +there will be a directory ending with `_data` - this is conduit's +data directory (which is saved thanks to the `--keep-data-dir` flag). +In that directory under `exporter_file_writer/` +the generated blocks and a genesis file will be saved. + +## Scenario Distribution - Configuration vs. Reality + +This section follows up on the [Scenario Configuration](#scenario-configuration) section to detail how each kind of transaction is actually chosen. +Note that -especially for early rounds- there is no guarantee that the +percentages of transaction types will resemble the configured distribution. + +For example consider the [Organic 25,000](scenarios/benchmarks/organic.25000.yml) scenario: + +```yml +name: "Organic (25000)" +genesis_accounts: 10000 +genesis_account_balance: 1000000000000 +tx_per_block: 25000 + +# transaction distribution +tx_pay_fraction: 0.05 +tx_asset_fraction: 0.75 +tx_app_fraction: 0.20 + +# payment config +pay_acct_create_fraction: 0.10 +pay_xfer_fraction: 0.90 + +# asset config +asset_create_fraction: 0.001 +asset_optin_fraction: 0.1 +asset_close_fraction: 0.05 +asset_xfer_fraction: 0.849 +asset_delete_fraction: 0 + +# app kind config +app_boxes_fraction: 1.0 +app_swap_fraction: 0.0 + +# app boxes config +app_boxes_create_fraction: 0.01 +app_boxes_optin_fraction: 0.1 +app_boxes_call_fraction: 0.89 +``` + +We are _actually_ asking the generator for the following distribution: + +* `pay_acct_create_fraction = 0.005 (= 0.05 * 0.10)` +* `pay_xfer_fraction = 0.045 (= 0.05 * 0.90)` +* `asset_create_fraction = 0.00075 (= 0.75 * 0.001)` +* `asset_optin_fraction = 0.075 (= 0.75 * 0.1)` +* `asset_close_fraction = 0.0375 (= 0.75 * 0.05)` +* `asset_xfer_fraction = 0.63675 (= 0.75 * 0.849)` +* `asset_delete_fraction = 0` +* `app_boxes_create_fraction = 0.002 (= 0.20 * 1.0 * 0.01)` +* `app_boxes_optin_fraction = 0.02 (= 0.20 * 1.0 * 0.1)` +* `app_boxes_call_fraction = 0.178 (= 0.20 * 1.0 * 0.89)` + +The block generator randomly chooses + +1. the transaction type (pay, asset, or app) according to the `transaction distribution` +2. based on the type: + + a. for payments and assets, the specific type based on the `payment config` and `asset config` distributions + + b. for apps, the app kind (boxes or swaps) based on the `app kind config` distribution + +3. For _apps only_: the specific app call based on the `app boxes config` (and perhaps in the future `app swap config`) + +As each of the steps above is itself random, we only expect _approximate matching_ to the configured distribution. + +Furthermore, for certain asset and app transactions there may be a substitution that occurs based on the type. In particular: + +* for **assets**: + * when a requested asset txn is **create**, it is never substituted + * when there are no assets, an **asset create** is always substituted + * when a requested asset txn is **delete** but the creator doesn't hold all asset funds, an **asset close** is substitued (which itself may be substituted using the **close** rule below) + * when a requested asset txn is **opt in** but all accounts are already opted in, an **asset close** is substituted (which itself may be substituted using the **close** rule below) + * when a requested asset txn is **transfer** but there is only one account holding it, an **asset opt in** is substituted (which itself may be substituted using the **asset opt in** rule above) + * when a requested asset txn is **close** but there is only one account holding it, an **asset opt in** is substituted (which itself may be substituted using the **asset opt in** rule above) +* for **apps**: + * when a requested app txn is **create**, it is never substituted + * when a requested app txn is **opt in**: + * if the sender is already opted in, an **app call** is substituted + * otherwise, if the sender's opt-in is pending for the round, an **app create** is substituted + * when a requested app txn is **call** but it's not opted into, an **app opt in** is attempted to be substituted (but this may itself be substituted for given the **app opt in** rule above) + +Over time, we expect the state of the generator to stabilize so that very few substitutions occur. However, especially for the first few rounds, there may be drastic differences between the config distribution and observed percentages. + +In particular: + +* for Round 1, all app transactions are replaced by **app create** +* for Round 2, all **app call** transactions are replaced by **app opt in** + +Therefore, for scenarios involving a variety of app transactions, only for Round 3 and higher do we expect to see distributions comparable to those configured. + +> NOTE: Even in the steady state, we still expect fundamental deviations +> from the configured distributions in the cases of apps. This is because +> an app call may have associated group and inner transactions. For example, +> if an app call requires 1 sibling asset call in its group and has 2 inner payments, this single app call will generate 1 additional asset txn and 2 payment txns. diff --git a/tools/block-generator/generator/daemon.go b/tools/block-generator/generator/daemon.go index fb4f52bab3..0a1371a1bc 100644 --- a/tools/block-generator/generator/daemon.go +++ b/tools/block-generator/generator/daemon.go @@ -48,7 +48,7 @@ func init() { DaemonCmd.Flags().StringVarP(&configFile, "config", "c", "", "Specify the block configuration yaml file.") DaemonCmd.Flags().Uint64VarP(&port, "port", "p", 4010, "Port to start the server at.") - DaemonCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "If set the runner will print debugging information from the generator and ledger.") + DaemonCmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "If set the daemon will print debugging information from the generator and ledger.") DaemonCmd.MarkFlagRequired("config") } diff --git a/tools/block-generator/generator/server.go b/tools/block-generator/generator/server.go index edfe470f3d..5b170c504e 100644 --- a/tools/block-generator/generator/server.go +++ b/tools/block-generator/generator/server.go @@ -75,28 +75,30 @@ func help(w http.ResponseWriter, r *http.Request) { fmt.Fprintf(w, "Use /v2/blocks/:blocknum: to get a block.") } -func maybeWriteError(w http.ResponseWriter, err error) { +func maybeWriteError(handler string, w http.ResponseWriter, err error) { if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) + msg := fmt.Sprintf("%s handler: error encountered while writing response for: %v\n", handler, err) + fmt.Println(msg) + http.Error(w, msg, http.StatusInternalServerError) return } } func getReportHandler(gen Generator) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { - maybeWriteError(w, gen.WriteReport(w)) + maybeWriteError("report", w, gen.WriteReport(w)) } } func getStatusWaitHandler(gen Generator) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { - maybeWriteError(w, gen.WriteStatus(w)) + maybeWriteError("status wait", w, gen.WriteStatus(w)) } } func getGenesisHandler(gen Generator) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { - maybeWriteError(w, gen.WriteGenesis(w)) + maybeWriteError("genesis", w, gen.WriteGenesis(w)) } } @@ -113,7 +115,7 @@ func getBlockHandler(gen Generator) func(w http.ResponseWriter, r *http.Request) http.Error(w, err.Error(), http.StatusBadRequest) return } - maybeWriteError(w, gen.WriteBlock(w, round)) + maybeWriteError("block", w, gen.WriteBlock(w, round)) } } @@ -125,7 +127,7 @@ func getAccountHandler(gen Generator) func(w http.ResponseWriter, r *http.Reques http.Error(w, err.Error(), http.StatusBadRequest) return } - maybeWriteError(w, gen.WriteAccount(w, account)) + maybeWriteError("account", w, gen.WriteAccount(w, account)) } } @@ -141,7 +143,7 @@ func getDeltasHandler(gen Generator) func(w http.ResponseWriter, r *http.Request http.Error(w, err.Error(), http.StatusBadRequest) return } - maybeWriteError(w, gen.WriteDeltas(w, round)) + maybeWriteError("deltas", w, gen.WriteDeltas(w, round)) } } diff --git a/tools/block-generator/runner/run.go b/tools/block-generator/runner/run.go index 6cd8d2398a..f2e1c1fc1f 100644 --- a/tools/block-generator/runner/run.go +++ b/tools/block-generator/runner/run.go @@ -43,8 +43,11 @@ import ( "github.com/algorand/go-algorand/tools/block-generator/util" ) -//go:embed template/conduit.yml.tmpl -var conduitConfigTmpl string +//go:embed template/conduit_pg_exporter.tmpl +var conduitPostgresConfigTmpl string + +//go:embed template/conduit_file_exporter.tmpl +var conduitFileExporterConfigTmpl string const pad = " " @@ -54,6 +57,7 @@ type Args struct { Path string ConduitBinary string MetricsPort uint64 + Template string PostgresConnectionString string CPUProfilePath string RunDuration time.Duration @@ -153,20 +157,29 @@ func (r *Args) run(reportDirectory string) error { // get next db round var nextRound uint64 var err error - if r.ResetDB { - fmt.Printf("%sPostgreSQL resetting.\n", pad) - if err = util.EmptyDB(r.PostgresConnectionString); err != nil { - return fmt.Errorf("emptyDB err: %w", err) - } - nextRound = 0 - } else { - nextRound, err = util.GetNextRound(r.PostgresConnectionString) - if err != nil && err == util.ErrorNotInitialized { + switch r.Template { + case "file-exporter": + fmt.Printf("%sUsing File Exporter to persist blocks.\n", pad) + case "postgres-exporter": + fmt.Printf("%sUsing PostgreSQL Exporter to persist blocks.\n", pad) + if r.ResetDB { + fmt.Printf("%sPostgreSQL resetting.\n", pad) + if err = util.EmptyDB(r.PostgresConnectionString); err != nil { + return fmt.Errorf("emptyDB err: %w", err) + } nextRound = 0 - } else if err != nil { - return fmt.Errorf("getNextRound err: %w", err) + } else { + nextRound, err = util.GetNextRound(r.PostgresConnectionString) + if err != nil && err == util.ErrorNotInitialized { + nextRound = 0 + } else if err != nil { + return fmt.Errorf("getNextRound err: %w", err) + } + fmt.Printf("%sPostgreSQL next round: %d\n", pad, nextRound) } - fmt.Printf("%sPostgreSQL next round: %d\n", pad, nextRound) + default: + // TODO: the default case should attempt to read the supplied template name as a file under ./template/ + return fmt.Errorf("unknown template type: %s", r.Template) } if r.StartDelay > 0 { @@ -188,6 +201,16 @@ func (r *Args) run(reportDirectory string) error { }() // create conduit config from template + var conduitConfigTmpl string + switch r.Template { + case "file-exporter": + conduitConfigTmpl = conduitFileExporterConfigTmpl + case "postgres-exporter": + conduitConfigTmpl = conduitPostgresConfigTmpl + default: + return fmt.Errorf("unknown template type: %s", r.Template) + } + t, err := template.New("conduit").Parse(conduitConfigTmpl) if err != nil { return fmt.Errorf("unable to parse conduit config template: %w", err) @@ -296,14 +319,14 @@ func recordDataToWriter(start time.Time, entry Entry, prefix string, out io.Writ tps := totalTxn / importTimeS key := "overall_transactions_per_second" msg := fmt.Sprintf("%s_%s:%.2f\n", prefix, key, tps) - if _, err := fmt.Fprintf(out, msg); err != nil { + if _, err := fmt.Fprint(out, msg); err != nil { return fmt.Errorf("unable to write metric '%s': %w", key, err) } // Uptime key = "uptime_seconds" msg = fmt.Sprintf("%s_%s:%.2f\n", prefix, key, time.Since(start).Seconds()) - if _, err := fmt.Fprintf(out, msg); err != nil { + if _, err := fmt.Fprint(out, msg); err != nil { return fmt.Errorf("unable to write metric '%s': %w", key, err) } @@ -323,7 +346,7 @@ func recordMetricToWriter(entry Entry, outputKey, metricSuffix string, t metricT msg = fmt.Sprintf("%s:%.2f\n", outputKey, value) } - if _, err := fmt.Fprintf(out, msg); err != nil { + if _, err := fmt.Fprint(out, msg); err != nil { return fmt.Errorf("unable to write metric '%s': %w", outputKey, err) } @@ -383,7 +406,7 @@ func getMetric(entry Entry, suffix string, rateMetric bool) (float64, error) { func writeReport(w io.Writer, scenario string, start time.Time, runDuration time.Duration, generatorReport generator.Report, collector *MetricsCollector) error { write := func(pattern string, parts ...any) error { str := fmt.Sprintf(pattern, parts...) - if _, err := fmt.Fprintf(w, str); err != nil { + if _, err := fmt.Fprint(w, str); err != nil { return fmt.Errorf("unable to write '%s': %w", str, err) } return nil @@ -426,12 +449,12 @@ func writeReport(w io.Writer, scenario string, start time.Time, runDuration time txCount := effects[metric] allTxns += txCount str := fmt.Sprintf("transaction_%s_total:%d\n", metric, txCount) - if _, err := fmt.Fprintf(w, str); err != nil { + if _, err := fmt.Fprint(w, str); err != nil { return fmt.Errorf("unable to write '%s' metric: %w", str, err) } } str := fmt.Sprintf("transaction_%s_total:%d\n", "ALL", allTxns) - if _, err := fmt.Fprintf(w, str); err != nil { + if _, err := fmt.Fprint(w, str); err != nil { return fmt.Errorf("unable to write '%s' metric: %w", str, err) } diff --git a/tools/block-generator/runner/runner.go b/tools/block-generator/runner/runner.go index ebb19a738e..4c11fde59c 100644 --- a/tools/block-generator/runner/runner.go +++ b/tools/block-generator/runner/runner.go @@ -35,17 +35,21 @@ func init() { Use: "runner", Short: "Run test suite and collect results.", Long: "Run an automated test suite using the block-generator daemon and a provided conduit binary. Results are captured to a specified output directory.", - Run: func(cmd *cobra.Command, args []string) { + RunE: func(cmd *cobra.Command, args []string) error{ fmt.Printf("starting block-generator runner with args: %+v\n", runnerArgs) - if err := Run(runnerArgs); err != nil { - fmt.Println(err) + + if runnerArgs.Template == "postgres-exporter" && runnerArgs.PostgresConnectionString == "" { + return fmt.Errorf("exporting to postgres requires that postgres-connection-string to be set") } + + return Run(runnerArgs) }, } RunnerCmd.Flags().StringVarP(&runnerArgs.Path, "scenario", "s", "", "Directory containing scenarios, or specific scenario file.") RunnerCmd.Flags().StringVarP(&runnerArgs.ConduitBinary, "conduit-binary", "i", "", "Path to conduit binary.") RunnerCmd.Flags().Uint64VarP(&runnerArgs.MetricsPort, "metrics-port", "p", 9999, "Port to start the metrics server at.") + RunnerCmd.Flags().StringVarP(&runnerArgs.Template, "template", "", "postgres-exporter", "Specify the conduit template to use. Choices are: file-exporter or postgres-exporter.") RunnerCmd.Flags().StringVarP(&runnerArgs.PostgresConnectionString, "postgres-connection-string", "c", "", "Postgres connection string.") RunnerCmd.Flags().DurationVarP(&runnerArgs.RunDuration, "test-duration", "d", 5*time.Minute, "Duration to use for each scenario.") RunnerCmd.Flags().StringVarP(&runnerArgs.BaseReportDirectory, "report-directory", "r", "", "Location to place test reports. If --times is used, this is the prefix for multiple report directories.") @@ -62,6 +66,5 @@ func init() { RunnerCmd.MarkFlagRequired("scenario") RunnerCmd.MarkFlagRequired("conduit-binary") - RunnerCmd.MarkFlagRequired("postgres-connection-string") RunnerCmd.MarkFlagRequired("report-directory") } diff --git a/tools/block-generator/runner/template/conduit_file_exporter.tmpl b/tools/block-generator/runner/template/conduit_file_exporter.tmpl new file mode 100644 index 0000000000..f675022096 --- /dev/null +++ b/tools/block-generator/runner/template/conduit_file_exporter.tmpl @@ -0,0 +1,65 @@ +# Log verbosity: PANIC, FATAL, ERROR, WARN, INFO, DEBUG, TRACE +log-level: {{.LogLevel}} + +# If no log file is provided logs are written to stdout. +log-file: {{.LogFile}} + +# Number of retries to perform after a pipeline plugin error. +retry-count: 120 + +# Time duration to wait between retry attempts. +retry-delay: "1s" + +# Optional filepath to use for pidfile. +#pid-filepath: /path/to/pidfile + +# Whether or not to print the conduit banner on startup. +hide-banner: false + +# When enabled prometheus metrics are available on '/metrics' +metrics: + mode: ON + addr: "{{.MetricsPort}}" + prefix: "conduit" + +# The importer is typically an algod follower node. +importer: + name: algod + config: + # The mode of operation, either "archival" or "follower". + # * archival mode allows you to start processing on any round but does not + # contain the ledger state delta objects required for the postgres writer. + # * follower mode allows you to use a lightweight non-archival node as the + # data source. In addition, it will provide ledger state delta objects to + # the processors and exporter. + mode: "follower" + + # Algod API address. + netaddr: "{{.AlgodNet}}" + + # Algod API token. + token: "" + + +# Zero or more processors may be defined to manipulate what data +# reaches the exporter. +processors: + +# An exporter is defined to do something with the data. +exporter: + name: "file_writer" + config: + # BlocksDir is the path to a directory where block data should be stored. + # The directory is created if it doesn't exist. If no directory is provided + # blocks are written to the Conduit data directory. + #block-dir: "/path/to/block/files" + + # FilenamePattern is the format used to write block files. It uses go + # string formatting and should accept one number for the round. + # If the file has a '.gz' extension, blocks will be gzipped. + # Default: "%[1]d_block.msgp.gz" + filename-pattern: "%[1]d_block.msgp.gz" + + # DropCertificate is used to remove the vote certificate from the block data before writing files. + drop-certificate: false + diff --git a/tools/block-generator/runner/template/conduit.yml.tmpl b/tools/block-generator/runner/template/conduit_pg_exporter.tmpl similarity index 100% rename from tools/block-generator/runner/template/conduit.yml.tmpl rename to tools/block-generator/runner/template/conduit_pg_exporter.tmpl diff --git a/tools/block-generator/scripts/run_runner.py b/tools/block-generator/scripts/run_runner.py deleted file mode 100644 index 5f0753930b..0000000000 --- a/tools/block-generator/scripts/run_runner.py +++ /dev/null @@ -1,201 +0,0 @@ -import argparse -import os -from pathlib import Path -import shlex -import subprocess -import sys -import time - - -POSTGRES_CONTAINER = "generator-test-container" -POSTGRES_PORT = 15432 -POSTGRES_DATABASE = "generator_db" - -REPORT_DIRECTORY = "../../tmp/OUTPUT_RUN_RUNNER_TEST" - -CWD = Path.cwd() - -NL = "\n" -BS = "\\" -DBS = BS * 2 -Q = '"' -SQ = ' "' - - -def run_cmd(cmd): - print(f"Running command: {cmd}") - process = subprocess.Popen( - shlex.split(cmd.replace("\\\n", " ")), - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - stdout, stderr = process.communicate() - if (rcode := process.returncode) != 0: - print(f"Error executing command: {cmd}") - print(stderr.decode()) - sys.exit(rcode) - return stdout.decode() - - -def up(args): - run_cmd(f"docker rm -f {args.pg_container}") - run_cmd( - f"docker run -d --name {args.pg_container} -e POSTGRES_USER=algorand -e POSTGRES_PASSWORD=algorand -p {args.pg_port}:5432 postgres" - ) - time.sleep(5) - - run_cmd( - f'docker exec -it {args.pg_container} psql -Ualgorand -c "create database {args.pg_database}"' - ) - - -def down(args): - run_cmd(f"docker rm -f {args.pg_container}") - - -def launch_json_args(cmd: str): - def tighten(x): - return x.replace(" \\", "\\") - - def wrap(x): - return tighten(x) if x.startswith('"') else f'"{tighten(x)}"' - - newlines = [] - lines = cmd.splitlines() - for i, line in enumerate(lines): - if i == 0: - continue - if not line.startswith("--"): - aline = wrap(line.replace(" ", "")) - else: - aline = ", ".join(map(wrap, line.split(" ", maxsplit=1))) - - if i < len(lines) - 1: - aline += "," - - newlines.append(aline) - return f"[{(NL.join(newlines)).replace(BS, '')}]" - - -def parse_args(): - parser = argparse.ArgumentParser() - parser.add_argument("--conduit-binary", help="Path to conduit binary") - parser.add_argument( - "--scenario", - default=(default := CWD.parents[1] / "test_scenario.yml"), - help=f"Scenario configuration file ({default=!s})", - ) - parser.add_argument( - "--reset-db", - action="store_true", - default=False, - help="Reset the DB and start at round 0 (default=False)", - ) - parser.add_argument( - "--purge", - action="store_true", - default=False, - help="Shutdown container that has been kept alive (default=False)", - ) - parser.add_argument( - "--keep-alive", - action="store_true", - default=False, - help="Keep postgres container alive at end of run (default=False)", - ) - parser.add_argument( - "--pg-container", - default=(default := POSTGRES_CONTAINER), - help=f"Name of postgres container ({default=})", - ) - parser.add_argument( - "--pg-port", - default=(default := POSTGRES_PORT), - help=f"Postgres port ({default=})", - ) - parser.add_argument( - "--pg-database", - default=(default := POSTGRES_DATABASE), - help=f"Postgres database ({default=})", - ) - parser.add_argument( - "--report-directory", - default=(default := REPORT_DIRECTORY), - help=f"Report directory ({default=})", - ) - parser.add_argument( - "--build-generator", - action="store_true", - default=False, - help="Build the generator binary (default=False)", - ) - parser.add_argument( - "--skip-runner", - action="store_true", - default=False, - help="Skip running the generator (default=False)", - ) - parser.add_argument( - "--test-duration", - default=(default := "30s"), - help=f"Test duration ({default=})", - ) - - args = parser.parse_args() - print(args) - return args - - -def main(): - args = parse_args() - - try: - if not args.purge: - print(f"Using scenario file: {args.scenario}") - print(f"!!! rm -rf {args.report_directory} !!!") - run_cmd(f"rm -rf {args.report_directory}") - - if args.build_generator: - print("Building generator.") - os.chdir(CWD) - run_cmd("go build") - os.chdir("..") - else: - print("Skipping generator build.") - - print("Starting postgres container.") - up(args) - - SLNL = "\\\n" - generator_cmd = f"""{CWD}/block-generator \\ -runner \\ ---conduit-binary "{args.conduit_binary}" \\ ---report-directory {args.report_directory} \\ ---test-duration {args.test_duration} \\ ---conduit-log-level trace \\ ---postgres-connection-string "host=localhost user=algorand password=algorand dbname={args.pg_database} port={args.pg_port} sslmode=disable" \\ ---scenario {args.scenario} {DBS + NL + '--reset-db' if args.reset_db else ''}""" - if args.skip_runner: - print("Skipping test runner.") - print(f"Run it yourself:\n{generator_cmd}") - print( - f"""`launch.json` args: -{launch_json_args(generator_cmd)}""" - ) - else: - print("Starting test runner") - run_cmd(generator_cmd) - else: - print("Purging postgres container - NO OTHER ACTION TAKEN") - down(args) - finally: - if not args.keep_alive: - print("Stopping postgres container.") - down(args) - else: - print(f"Keeping postgres container alive: {args.pg_container}") - print(f"Also, not removing report directory: {args.report_directory}") - - -if __name__ == "__main__": - main()