Skip to content

Commit

Permalink
aggregated block constants
Browse files Browse the repository at this point in the history
  • Loading branch information
dvush committed Feb 5, 2021
1 parent 6de87fd commit bda1a29
Show file tree
Hide file tree
Showing 13 changed files with 123 additions and 71 deletions.
5 changes: 4 additions & 1 deletion core/bin/key_generator/src/recursive_keys.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,10 @@ use zksync_prover_utils::fs_utils::{
};

pub fn make_recursive_verification_keys(config: ChainConfig) {
for (proofs, setup_power) in config.circuit.aggregated_proof_sizes_with_setup_pow() {
for (proofs, setup_power) in config
.circuit
.supported_aggregated_proof_sizes_with_setup_pow()
{
let path = get_recursive_verification_key_path(proofs);
vlog::info!(
"Generating recursive verification key for {} proofs into: {}",
Expand Down
8 changes: 5 additions & 3 deletions core/bin/key_generator/src/sample_proofs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ pub fn make_sample_proofs(config: ChainConfig) -> anyhow::Result<()> {

let max_aggregated_size = *config
.circuit
.aggregated_proof_sizes
.supported_aggregated_proof_sizes
.iter()
.max()
.ok_or_else(|| anyhow::anyhow!("Aggregated proof sizes should not be empty"))?;
Expand All @@ -63,7 +63,7 @@ pub fn make_sample_proofs(config: ChainConfig) -> anyhow::Result<()> {
let aggregated_proof = {
let min_aggregated_size = *config
.circuit
.aggregated_proof_sizes
.supported_aggregated_proof_sizes
.iter()
.min()
.ok_or_else(|| anyhow::anyhow!("Aggregated proof sizes should not be empty"))?;
Expand All @@ -79,7 +79,9 @@ pub fn make_sample_proofs(config: ChainConfig) -> anyhow::Result<()> {
gen_aggregate_proof(
vks,
proof_data,
&config.circuit.aggregated_proof_sizes_with_setup_pow(),
&config
.circuit
.supported_aggregated_proof_sizes_with_setup_pow(),
false,
)?
};
Expand Down
4 changes: 2 additions & 2 deletions core/bin/key_generator/src/verifier_contract_generator/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,12 @@ pub(crate) fn create_verifier_contract(config: ChainConfig) {
let chunks = to_json(config.circuit.supported_block_chunks_sizes);
template_params.insert("chunks".to_string(), chunks);

let sizes = to_json(config.circuit.aggregated_proof_sizes.clone());
let sizes = to_json(config.circuit.supported_aggregated_proof_sizes.clone());
template_params.insert("sizes".to_string(), sizes);

let templates_for_key_getters = config
.circuit
.aggregated_proof_sizes
.supported_aggregated_proof_sizes
.into_iter()
.map(|blocks| {
let key_getter_name = format!("getVkAggregated{}", blocks);
Expand Down
5 changes: 3 additions & 2 deletions core/bin/prover/src/plonk_step_by_step_prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,8 +36,9 @@ impl ProverConfig for PlonkStepByStepProverConfig {
fn from_env() -> Self {
let env_config = ChainConfig::from_env();

let aggregated_proof_sizes_with_setup_pow =
env_config.circuit.aggregated_proof_sizes_with_setup_pow();
let aggregated_proof_sizes_with_setup_pow = env_config
.circuit
.supported_aggregated_proof_sizes_with_setup_pow();

Self {
download_setup_from_network: parse_env("MISC_PROVER_DOWNLOAD_SETUP"),
Expand Down
26 changes: 12 additions & 14 deletions core/bin/zksync_api/src/fee_ticker/constants.rs
Original file line number Diff line number Diff line change
@@ -1,30 +1,28 @@
use zksync_types::{
config::MAX_WITHDRAWALS_TO_COMPLETE_IN_A_CALL,
gas_counter::{CommitCost, GasCounter, VerifyCost},
gas_counter::{CommitCost, VerifyCost},
ChangePubKeyOp, TransferOp, TransferToNewOp, WithdrawOp,
};

/// Gas cost per chunk to cover constant cost of commit, execute and prove transactions
pub(crate) const AMORTIZED_COST_PER_CHUNK: u64 = 200;
// Base operation costs estimated via `gas_price` test.
//
// Factor of 1000 * CHUNKS accounts for constant overhead of the commit and verify for block of 680 chunks
// (140k + 530k) / 680. Should be removed after recursion is introduced to mainnet.
// Factor of AMORTIZED_COST_PER_CHUNK * CHUNKS accounts for constant overhead of the commit, execute, prove for blocks of 680 chunks
// where we assume that we commit 5 blocks at once, prove 10 and execute 5
pub(crate) const BASE_TRANSFER_COST: u64 = VerifyCost::TRANSFER_COST
+ CommitCost::TRANSFER_TO_NEW_COST
+ 1000 * (TransferOp::CHUNKS as u64);
+ CommitCost::TRANSFER_COST
+ AMORTIZED_COST_PER_CHUNK * (TransferOp::CHUNKS as u64);
pub(crate) const BASE_TRANSFER_TO_NEW_COST: u64 = VerifyCost::TRANSFER_TO_NEW_COST
+ CommitCost::TRANSFER_TO_NEW_COST
+ 1000 * (TransferToNewOp::CHUNKS as u64);
+ AMORTIZED_COST_PER_CHUNK * (TransferToNewOp::CHUNKS as u64);
pub(crate) const BASE_WITHDRAW_COST: u64 = VerifyCost::WITHDRAW_COST
+ CommitCost::WITHDRAW_COST
+ GasCounter::COMPLETE_WITHDRAWALS_COST
+ 1000 * (WithdrawOp::CHUNKS as u64)
+ (GasCounter::COMPLETE_WITHDRAWALS_BASE_COST / MAX_WITHDRAWALS_TO_COMPLETE_IN_A_CALL);
+ AMORTIZED_COST_PER_CHUNK * (WithdrawOp::CHUNKS as u64);
pub(crate) const BASE_CHANGE_PUBKEY_OFFCHAIN_COST: u64 = CommitCost::CHANGE_PUBKEY_COST_OFFCHAIN
+ VerifyCost::CHANGE_PUBKEY_COST
+ 1000 * (ChangePubKeyOp::CHUNKS as u64);
+ AMORTIZED_COST_PER_CHUNK * (ChangePubKeyOp::CHUNKS as u64);
pub(crate) const BASE_CHANGE_PUBKEY_ONCHAIN_COST: u64 = CommitCost::CHANGE_PUBKEY_COST_ONCHAIN
+ zksync_types::gas_counter::VerifyCost::CHANGE_PUBKEY_COST
+ 1000 * (ChangePubKeyOp::CHUNKS as u64);
+ VerifyCost::CHANGE_PUBKEY_COST
+ AMORTIZED_COST_PER_CHUNK * (ChangePubKeyOp::CHUNKS as u64);

// The Subsidized cost of operations.
// Represent the cost of performing operations after recursion is introduced to mainnet.
Expand Down
34 changes: 17 additions & 17 deletions core/bin/zksync_core/src/committer/aggregated_committer.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use chrono::{DateTime, Utc};
use std::cmp::max;
use std::time::Duration;
use zksync_config::ZkSyncConfig;
use zksync_crypto::proof::AggregatedProof;
use zksync_storage::chain::block::BlockSchema;
use zksync_storage::chain::operations::OperationsSchema;
Expand Down Expand Up @@ -188,13 +189,9 @@ fn create_execute_blocks_operation(
})
}

const MAX_BLOCK_TO_COMMIT: usize = 5;
const BLOCK_COMMIT_DEADLINE: Duration = Duration::from_secs(10);
const MAX_GAS_TX: u64 = 2_000_000;
const AVAILABLE_AGGREGATE_PROOFS: &[usize] = &[1, 5];

async fn create_aggregated_commits_storage(
storage: &mut StorageProcessor<'_>,
config: &ZkSyncConfig,
) -> anyhow::Result<bool> {
let last_aggregate_committed_block = OperationsSchema(storage)
.get_last_affected_block_by_aggregated_action(AggregatedActionType::CommitBlocks)
Expand All @@ -216,9 +213,9 @@ async fn create_aggregated_commits_storage(
&old_committed_block,
&new_blocks,
Utc::now(),
MAX_BLOCK_TO_COMMIT,
BLOCK_COMMIT_DEADLINE,
MAX_GAS_TX.into(),
config.chain.state_keeper.max_aggregated_blocks_to_commit,
config.chain.state_keeper.block_commit_deadline(),
config.chain.state_keeper.max_aggregated_tx_gas.into(),
);

if let Some(commit_operation) = commit_operation {
Expand All @@ -235,6 +232,7 @@ async fn create_aggregated_commits_storage(

async fn create_aggregated_prover_task_storage(
storage: &mut StorageProcessor<'_>,
config: &ZkSyncConfig,
) -> anyhow::Result<bool> {
let last_aggregate_committed_block = OperationsSchema(storage)
.get_last_affected_block_by_aggregated_action(AggregatedActionType::CommitBlocks)
Expand Down Expand Up @@ -266,10 +264,10 @@ async fn create_aggregated_prover_task_storage(

let create_proof_operation = create_new_create_proof_operation(
&blocks_with_proofs,
AVAILABLE_AGGREGATE_PROOFS,
&config.chain.state_keeper.aggregated_proof_sizes,
Utc::now(),
BLOCK_COMMIT_DEADLINE,
MAX_GAS_TX.into(),
config.chain.state_keeper.block_prove_deadline(),
config.chain.state_keeper.max_aggregated_tx_gas.into(),
);
if let Some(operation) = create_proof_operation {
let aggregated_op = operation.into();
Expand Down Expand Up @@ -350,6 +348,7 @@ async fn create_aggregated_publish_proof_operation_storage(

async fn create_aggregated_execute_operation_storage(
storage: &mut StorageProcessor<'_>,
config: &ZkSyncConfig,
) -> anyhow::Result<bool> {
let last_aggregate_executed_block = OperationsSchema(storage)
.get_last_affected_block_by_aggregated_action(AggregatedActionType::ExecuteBlocks)
Expand All @@ -376,9 +375,9 @@ async fn create_aggregated_execute_operation_storage(
let execute_operation = create_execute_blocks_operation(
&blocks,
Utc::now(),
MAX_BLOCK_TO_COMMIT,
BLOCK_COMMIT_DEADLINE,
MAX_GAS_TX.into(),
config.chain.state_keeper.max_aggregated_blocks_to_execute,
config.chain.state_keeper.block_execute_deadline(),
config.chain.state_keeper.max_aggregated_tx_gas.into(),
);

if let Some(operation) = execute_operation {
Expand All @@ -395,11 +394,12 @@ async fn create_aggregated_execute_operation_storage(

pub async fn create_aggregated_operations_storage(
storage: &mut StorageProcessor<'_>,
config: &ZkSyncConfig,
) -> anyhow::Result<()> {
while create_aggregated_commits_storage(storage).await? {}
while create_aggregated_prover_task_storage(storage).await? {}
while create_aggregated_commits_storage(storage, config).await? {}
while create_aggregated_prover_task_storage(storage, config).await? {}
while create_aggregated_publish_proof_operation_storage(storage).await? {}
while create_aggregated_execute_operation_storage(storage).await? {}
while create_aggregated_execute_operation_storage(storage, config).await? {}

Ok(())
}
Expand Down
8 changes: 5 additions & 3 deletions core/bin/zksync_core/src/committer/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ use serde::{Deserialize, Serialize};
use tokio::{task::JoinHandle, time};
// Workspace uses
use crate::mempool::MempoolBlocksRequest;
use zksync_config::ZkSyncConfig;
use zksync_storage::ConnectionPool;
use zksync_types::{
block::{Block, ExecutedOperations, PendingBlock},
Expand Down Expand Up @@ -185,7 +186,7 @@ async fn commit_block(
metrics::histogram!("committer.commit_block", start.elapsed());
}

async fn poll_for_new_proofs_task(pool: ConnectionPool) {
async fn poll_for_new_proofs_task(pool: ConnectionPool, config: ZkSyncConfig) {
let mut timer = time::interval(PROOF_POLL_INTERVAL);
loop {
timer.tick().await;
Expand All @@ -195,7 +196,7 @@ async fn poll_for_new_proofs_task(pool: ConnectionPool) {
.await
.expect("db connection failed for committer");

aggregated_committer::create_aggregated_operations_storage(&mut storage)
aggregated_committer::create_aggregated_operations_storage(&mut storage, &config)
.await
.map_err(|e| vlog::error!("Failed to create aggregated operation: {}", e))
.unwrap_or_default();
Expand All @@ -207,11 +208,12 @@ pub fn run_committer(
rx_for_ops: Receiver<CommitRequest>,
mempool_req_sender: Sender<MempoolBlocksRequest>,
pool: ConnectionPool,
config: &ZkSyncConfig,
) -> JoinHandle<()> {
tokio::spawn(handle_new_commit_task(
rx_for_ops,
mempool_req_sender,
pool.clone(),
));
tokio::spawn(poll_for_new_proofs_task(pool))
tokio::spawn(poll_for_new_proofs_task(pool, config.clone()))
}
1 change: 1 addition & 0 deletions core/bin/zksync_core/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,7 @@ pub async fn run_core(
proposed_blocks_receiver,
mempool_block_request_sender.clone(),
connection_pool.clone(),
&config,
);

// Start mempool.
Expand Down
55 changes: 46 additions & 9 deletions core/lib/config/src/configs/chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,22 +38,26 @@ pub struct Circuit {
/// so both arrays can be `zip`ped together).
pub supported_block_chunks_sizes_setup_powers: Vec<usize>,
/// Sizes of blocks for aggregated proofs.
pub aggregated_proof_sizes: Vec<usize>,
pub supported_aggregated_proof_sizes: Vec<usize>,
/// Setup power needed to create an aggregated proof for blocks of certain size (goes in the same order as the
/// previous field, so both arrays can be `zip`ped together).
pub aggregated_proof_sizes_setup_power2: Vec<u32>,
pub supported_aggregated_proof_sizes_setup_power2: Vec<u32>,
/// Depth of the Account Merkle tree.
pub account_tree_depth: usize,
/// Depth of the Balance Merkle tree.
pub balance_tree_depth: usize,
}

impl Circuit {
pub fn aggregated_proof_sizes_with_setup_pow(&self) -> Vec<(usize, u32)> {
self.aggregated_proof_sizes
pub fn supported_aggregated_proof_sizes_with_setup_pow(&self) -> Vec<(usize, u32)> {
self.supported_aggregated_proof_sizes
.iter()
.cloned()
.zip(self.aggregated_proof_sizes_setup_power2.iter().cloned())
.zip(
self.supported_aggregated_proof_sizes_setup_power2
.iter()
.cloned(),
)
.collect()
}
}
Expand All @@ -77,13 +81,32 @@ pub struct StateKeeper {
/// Maximum amount of miniblock iterations in case of block containing a fast withdrawal request.
pub fast_block_miniblock_iterations: u64,
pub fee_account_addr: Address,
pub aggregated_proof_sizes: Vec<usize>,
pub max_aggregated_blocks_to_commit: usize,
pub max_aggregated_blocks_to_execute: usize,
pub block_commit_deadline: u64,
pub block_prove_deadline: u64,
pub block_execute_deadline: u64,
pub max_aggregated_tx_gas: usize,
}

impl StateKeeper {
/// Converts `self.miniblock_iteration_interval` into `Duration`.
pub fn miniblock_iteration_interval(&self) -> Duration {
Duration::from_millis(self.miniblock_iteration_interval)
}

pub fn block_commit_deadline(&self) -> Duration {
Duration::from_secs(self.block_commit_deadline)
}

pub fn block_prove_deadline(&self) -> Duration {
Duration::from_secs(self.block_prove_deadline)
}

pub fn block_execute_deadline(&self) -> Duration {
Duration::from_secs(self.block_execute_deadline)
}
}

#[cfg(test)]
Expand All @@ -97,8 +120,8 @@ mod tests {
key_dir: "keys/plonk-975ae851".into(),
supported_block_chunks_sizes: vec![6, 30, 74, 150, 320, 630],
supported_block_chunks_sizes_setup_powers: vec![21, 22, 23, 24, 25, 26],
aggregated_proof_sizes: vec![1, 5, 10, 20],
aggregated_proof_sizes_setup_power2: vec![22, 24, 25, 26],
supported_aggregated_proof_sizes: vec![1, 5, 10, 20],
supported_aggregated_proof_sizes_setup_power2: vec![22, 24, 25, 26],
account_tree_depth: 32,
balance_tree_depth: 11,
},
Expand All @@ -111,6 +134,13 @@ mod tests {
miniblock_iterations: 10,
fast_block_miniblock_iterations: 5,
fee_account_addr: addr("de03a0B5963f75f1C8485B355fF6D30f3093BDE7"),
aggregated_proof_sizes: vec![1, 5],
max_aggregated_blocks_to_commit: 3,
max_aggregated_blocks_to_execute: 4,
block_commit_deadline: 300,
block_prove_deadline: 3_000,
block_execute_deadline: 4_000,
max_aggregated_tx_gas: 4_000_000,
},
}
}
Expand All @@ -121,8 +151,8 @@ mod tests {
CHAIN_CIRCUIT_KEY_DIR="keys/plonk-975ae851"
CHAIN_CIRCUIT_SUPPORTED_BLOCK_CHUNKS_SIZES="6,30,74,150,320,630"
CHAIN_CIRCUIT_SUPPORTED_BLOCK_CHUNKS_SIZES_SETUP_POWERS="21,22,23,24,25,26"
CHAIN_CIRCUIT_AGGREGATED_PROOF_SIZES="1,5,10,20"
CHAIN_CIRCUIT_AGGREGATED_PROOF_SIZES_SETUP_POWER2="22,24,25,26"
CHAIN_CIRCUIT_SUPPORTED_AGGREGATED_PROOF_SIZES="1,5,10,20"
CHAIN_CIRCUIT_SUPPORTED_AGGREGATED_PROOF_SIZES_SETUP_POWER2="22,24,25,26"
CHAIN_CIRCUIT_ACCOUNT_TREE_DEPTH="32"
CHAIN_CIRCUIT_BALANCE_TREE_DEPTH="11"
CHAIN_ETH_MAX_NUMBER_OF_WITHDRAWALS_PER_BLOCK="10"
Expand All @@ -132,6 +162,13 @@ CHAIN_STATE_KEEPER_MINIBLOCK_ITERATION_INTERVAL="200"
CHAIN_STATE_KEEPER_MINIBLOCK_ITERATIONS="10"
CHAIN_STATE_KEEPER_FAST_BLOCK_MINIBLOCK_ITERATIONS="5"
CHAIN_STATE_KEEPER_FEE_ACCOUNT_ADDR="0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7"
CHAIN_STATE_KEEPER_AGGREGATED_PROOF_SIZES="1,5"
CHAIN_STATE_KEEPER_MAX_AGGREGATED_BLOCKS_TO_COMMIT="3"
CHAIN_STATE_KEEPER_MAX_AGGREGATED_BLOCKS_TO_EXECUTE="4"
CHAIN_STATE_KEEPER_BLOCK_COMMIT_DEADLINE="300"
CHAIN_STATE_KEEPER_BLOCK_PROVE_DEADLINE="3000"
CHAIN_STATE_KEEPER_BLOCK_EXECUTE_DEADLINE="4000"
CHAIN_STATE_KEEPER_MAX_AGGREGATED_TX_GAS="4000000"
"#;
set_env(config);

Expand Down
Loading

0 comments on commit bda1a29

Please sign in to comment.