Skip to content

Commit

Permalink
seems to work
Browse files Browse the repository at this point in the history
  • Loading branch information
dvush committed Dec 3, 2020
1 parent a459c3e commit e4d5569
Show file tree
Hide file tree
Showing 16 changed files with 191 additions and 142 deletions.
3 changes: 2 additions & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 3 additions & 11 deletions contracts/contracts/ZkSync.sol
Original file line number Diff line number Diff line change
Expand Up @@ -429,12 +429,7 @@ contract ZkSync is UpgradeableMaster, Storage, Config, Events, ReentrancyGuard {

/// @notice Blocks commitment verification.
/// @notice Only verifies block commitments without any other processing
function proofBlocks(
StoredBlockInfo[] memory _committedBlocks,
uint256[] memory _commitmentIdxs,
ProofInput memory _proof
) external nonReentrant {
require(_committedBlocks.length == _commitmentIdxs.length, "pbl1");
function proofBlocks(StoredBlockInfo[] memory _committedBlocks, ProofInput memory _proof) external nonReentrant {
uint32 currentTotalBlocksProofed = totalBlocksProofed;
for (uint256 i = 0; i < _committedBlocks.length; ++i) {
require(
Expand All @@ -444,10 +439,7 @@ contract ZkSync is UpgradeableMaster, Storage, Config, Events, ReentrancyGuard {
++currentTotalBlocksProofed;

uint256 mask = (~uint256(0)) >> 3;
require(
_proof.commitments[_commitmentIdxs[i]] & mask == uint256(_committedBlocks[i].commitment) & mask,
"pbl3"
); // incorrect block commitment in proof
require(_proof.commitments[i] & mask == uint256(_committedBlocks[i].commitment) & mask, "pbl3"); // incorrect block commitment in proof
}

bool success =
Expand Down Expand Up @@ -485,7 +477,7 @@ contract ZkSync is UpgradeableMaster, Storage, Config, Events, ReentrancyGuard {

totalBlocksCommitted = blocksCommitted;
totalCommittedPriorityRequests -= revertedPriorityRequests;
if (totalBlocksCommitted > totalBlocksProofed) {
if (totalBlocksCommitted < totalBlocksProofed) {
totalBlocksProofed = totalBlocksCommitted;
}

Expand Down
22 changes: 19 additions & 3 deletions core/bin/prover/src/bin/dummy_prover.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ use std::time::Duration;
use zksync_prover::cli_utils::main_for_prover_impl;
use zksync_prover::{ApiClient, ProverConfig, ProverImpl};
use zksync_prover_utils::api::{JobRequestData, JobResultData};
use zksync_prover_utils::fs_utils::{load_correct_aggregated_proof, load_correct_single_proof};
use zksync_utils::get_env;

#[derive(Debug)]
Expand Down Expand Up @@ -36,10 +37,25 @@ impl ProverImpl for DummyProver {

fn create_proof(&self, data: JobRequestData) -> Result<JobResultData, Error> {
let empty_proof = match data {
JobRequestData::AggregatedBlockProof(_) => {
JobResultData::AggregatedBlockProof(Default::default())
JobRequestData::AggregatedBlockProof(single_proofs) => {
let mut aggregated_proof = load_correct_aggregated_proof()
.expect("Failed to load correct aggregated proof");
aggregated_proof.individual_vk_inputs = Vec::new();
for (single_proof, _) in single_proofs {
aggregated_proof
.individual_vk_inputs
.push(single_proof.0.input_values[0]);
aggregated_proof.individual_vk_idxs.push(0);
}

JobResultData::AggregatedBlockProof(aggregated_proof)
}
JobRequestData::BlockProof(prover_data, _) => {
let mut single_proof =
load_correct_single_proof().expect("Failed to load correct single proof");
single_proof.0.input_values[0] = prover_data.public_data_commitment;
JobResultData::BlockProof(single_proof)
}
JobRequestData::BlockProof(..) => JobResultData::BlockProof(Default::default()),
};
Ok(empty_proof)
}
Expand Down
4 changes: 2 additions & 2 deletions core/bin/prover/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,9 +106,9 @@ async fn compute_proof_no_blocking<PROVER>(
where
PROVER: ProverImpl + Send + Sync + 'static,
{
let (result_sender, result_receiver) = oneshot::channel();
let (panic_sender, panic_receiver) = oneshot::channel();
let (mut result_receiver, mut panic_receiver) = {
let (result_sender, result_receiver) = oneshot::channel();
let (panic_sender, panic_receiver) = oneshot::channel();
std::thread::spawn(move || {
// TODO: panic sender should work
// std::panic::set_hook(Box::new(|panic_info| {
Expand Down
62 changes: 44 additions & 18 deletions core/bin/zksync_core/src/committer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@ use serde::{Deserialize, Serialize};
use tokio::{task::JoinHandle, time};
// Workspace uses
use crate::mempool::MempoolRequest;
use zksync_crypto::params::RECURSIVE_CIRCUIT_SIZES;
use zksync_storage::{ConnectionPool, StorageProcessor};
use zksync_types::aggregated_operations::{
AggregatedActionType, AggregatedOperation, BlockExecuteOperationArg, BlocksCommitOperation,
BlocksExecuteOperation, BlocksProofOperation,
BlocksCreateProofOperation, BlocksExecuteOperation, BlocksProofOperation,
};
use zksync_types::{
block::{Block, ExecutedOperations, PendingBlock},
Expand Down Expand Up @@ -286,25 +287,46 @@ async fn create_aggregated_operations(storage: &mut StorageProcessor<'_>) -> any
}

if last_committed_block > last_aggregate_create_proof_block {
let mut proofs_exits = true;
let mut consecutive_proofs = Vec::new();
for block_number in last_aggregate_create_proof_block + 1..=last_committed_block {
proofs_exits = proofs_exits
&& storage
.prover_schema()
.load_proof(block_number)
.await?
.is_some();
if !proofs_exits {
let proof_exists = storage
.prover_schema()
.load_proof(block_number)
.await?
.is_some();
if proof_exists {
consecutive_proofs.push(block_number);
} else {
break;
}
}
if proofs_exits {
if consecutive_proofs.len() > 0 {
let aggregate_sizes = RECURSIVE_CIRCUIT_SIZES
.iter()
.map(|(proofs, _)| *proofs)
.collect::<Vec<_>>();
let max_agg_size = *aggregate_sizes
.iter()
.max()
.expect("should be at least one recursive size");
let agg_size = aggregate_sizes
.into_iter()
.find(|agg_size| *agg_size >= consecutive_proofs.len())
.unwrap_or(max_agg_size);

let mut block_numbers = Vec::new();
let mut blocks = Vec::new();
let mut block_idxs_in_proof = Vec::new();

let proofs_to_pad = if agg_size > consecutive_proofs.len() {
agg_size - consecutive_proofs.len()
} else {
0
};
let mut idx = 0;
for block_number in last_aggregate_create_proof_block + 1..=last_committed_block {
for block_number in last_aggregate_create_proof_block + 1
..=last_aggregate_create_proof_block + consecutive_proofs.len() as u32
{
let block = storage
.chain()
.block_schema()
Expand All @@ -317,7 +339,11 @@ async fn create_aggregated_operations(storage: &mut StorageProcessor<'_>) -> any
idx += 1;
}

let aggregated_op_create = AggregatedOperation::CreateProofBlocks(block_numbers);
let aggregated_op_create =
AggregatedOperation::CreateProofBlocks(BlocksCreateProofOperation {
blocks: block_numbers,
proofs_to_pad,
});

storage
.chain()
Expand All @@ -335,12 +361,15 @@ async fn create_aggregated_operations(storage: &mut StorageProcessor<'_>) -> any

if last_aggregate_create_proof_block > last_aggregate_publish_proof_block {
let create_proof_blocks =
if let Some(AggregatedOperation::CreateProofBlocks(create_proof_blocks)) = storage
if let Some(AggregatedOperation::CreateProofBlocks(BlocksCreateProofOperation {
blocks: create_proof_blocks,
..
})) = storage
.chain()
.operations_schema()
.get_aggregated_op_that_affects_block(
AggregatedActionType::CreateProofBlocks,
last_aggregate_create_proof_block + 1,
last_aggregate_publish_proof_block + 1,
)
.await?
{
Expand All @@ -359,23 +388,20 @@ async fn create_aggregated_operations(storage: &mut StorageProcessor<'_>) -> any
if let Some(proof) = proof {
let proof = proof.serialize_aggregated_proof();
let mut blocks = Vec::new();
let mut block_idxs_in_proof = Vec::new();
for (idx, block_number) in create_proof_blocks.into_iter().enumerate() {
for block_number in create_proof_blocks {
let block = storage
.chain()
.block_schema()
.get_block(block_number)
.await?
.expect("Failed to get last committed block from db");
blocks.push(block);
block_idxs_in_proof.push(idx);
}

let aggregated_op_publish =
AggregatedOperation::PublishProofBlocksOnchain(BlocksProofOperation {
blocks,
proof,
block_idxs_in_proof,
});
storage
.chain()
Expand Down
3 changes: 1 addition & 2 deletions core/bin/zksync_eth_sender/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -722,8 +722,7 @@ impl<ETH: EthereumInterface, DB: DatabaseInterface> ETHSender<ETH, DB> {
} // not for eth sender
AggregatedOperation::PublishProofBlocksOnchain(operation) => {
let args = operation.get_eth_tx_args();
self.ethereum
.encode_tx_data("verifyCommitments", args.as_slice())
self.ethereum.encode_tx_data("proofBlocks", args.as_slice())
}
AggregatedOperation::ExecuteBlocks(operation) => {
let args = operation.get_eth_tx_args();
Expand Down
22 changes: 19 additions & 3 deletions core/bin/zksync_witness_generator/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,9 @@ use zksync_prover_utils::api::{
JobRequestData, JobResultData, ProverInputRequest, ProverInputResponse, ProverOutputRequest,
WorkingOn,
};
use zksync_types::aggregated_operations::{AggregatedActionType, AggregatedOperation};
use zksync_types::aggregated_operations::{
AggregatedActionType, AggregatedOperation, BlocksCreateProofOperation,
};
use zksync_types::prover::{
ProverJobType, AGGREGATED_PROOF_JOB_PRIORITY, SINGLE_PROOF_JOB_PRIORITY,
};
Expand Down Expand Up @@ -150,19 +152,29 @@ async fn publish(
data: web::Data<AppState>,
r: web::Json<ProverOutputRequest>,
) -> actix_web::Result<HttpResponse> {
log::info!("Received a proof for job: {}", r.job_id);
let mut storage = data
.access_storage()
.await
.map_err(actix_web::error::ErrorInternalServerError)?;
let storage_result = match &r.data {
JobResultData::BlockProof(single_proof) => {
log::info!(
"Received a proof for job: {}, single block: {}",
r.job_id,
r.first_block
);
storage
.prover_schema()
.store_proof(r.job_id, r.first_block, single_proof)
.await
}
JobResultData::AggregatedBlockProof(aggregated_proof) => {
log::info!(
"Received a proof for job: {}, aggregated blocks: [{},{}]",
r.job_id,
r.first_block,
r.last_block
);
storage
.prover_schema()
.store_aggregated_proof(r.job_id, r.first_block, r.last_block, aggregated_proof)
Expand Down Expand Up @@ -294,7 +306,11 @@ async fn update_prover_job_queue(storage: &mut StorageProcessor<'_>) -> anyhow::
next_aggregated_proof_block,
)
.await?;
if let Some(AggregatedOperation::CreateProofBlocks(blocks)) = create_block_proof_action {
if let Some(AggregatedOperation::CreateProofBlocks(BlocksCreateProofOperation {
blocks,
..
})) = create_block_proof_action
{
let first_block = *blocks.first().expect("should have 1 block");
let last_block = *blocks.last().expect("should have 1 block");
let mut data = Vec::new();
Expand Down
1 change: 1 addition & 0 deletions core/lib/prover_utils/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,4 @@ log = "0.4"
backoff = "0.1.6"
reqwest = { version = "0.10.6", features = ["blocking"] }
serde = "1.0"
serde_json = "1.0"
15 changes: 15 additions & 0 deletions core/lib/prover_utils/src/fs_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ use std::io::BufReader;
use std::path::PathBuf;
use zksync_crypto::bellman::kate_commitment::{Crs, CrsForLagrangeForm, CrsForMonomialForm};
use zksync_crypto::params::{account_tree_depth, balance_tree_depth};
use zksync_crypto::proof::{AggregatedProof, SingleProof};
use zksync_crypto::Engine;

pub fn get_keys_root_dir() -> PathBuf {
Expand Down Expand Up @@ -97,3 +98,17 @@ pub fn get_recursive_verification_key_path(number_of_proofs: usize) -> PathBuf {
key.push(&format!("recursive_{}.key", number_of_proofs));
key
}

pub fn load_correct_aggregated_proof() -> anyhow::Result<AggregatedProof> {
let mut path = get_keys_root_dir();
path.push("zksync-aggregated-1.json");
let file = File::open(path)?;
Ok(serde_json::from_reader(file)?)
}

pub fn load_correct_single_proof() -> anyhow::Result<SingleProof> {
let mut path = get_keys_root_dir();
path.push("zksync-6-chunks.json");
let file = File::open(path)?;
Ok(serde_json::from_reader(file)?)
}
Loading

0 comments on commit e4d5569

Please sign in to comment.