diff --git a/Cargo.lock b/Cargo.lock index 7237381d16..bdbe356eb4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5654,6 +5654,7 @@ dependencies = [ "ethabi 12.0.0", "futures 0.3.6", "hex", + "lazy_static", "log 0.4.11", "num", "serde", diff --git a/core/bin/data_restore/src/main.rs b/core/bin/data_restore/src/main.rs index 9bb09e7dfe..1387679463 100644 --- a/core/bin/data_restore/src/main.rs +++ b/core/bin/data_restore/src/main.rs @@ -116,7 +116,7 @@ impl ContractsConfig { async fn main() { log::info!("Restoring zkSync state from the contract"); env_logger::init(); - let connection_pool = ConnectionPool::new(Some(1)).await; + let connection_pool = ConnectionPool::new(Some(1)); let config_opts = ConfigurationOptions::from_env(); let opt = Opt::from_args(); diff --git a/core/bin/server/examples/generate_exit_proof.rs b/core/bin/server/examples/generate_exit_proof.rs index 3a3fbb284b..4222c39409 100644 --- a/core/bin/server/examples/generate_exit_proof.rs +++ b/core/bin/server/examples/generate_exit_proof.rs @@ -45,7 +45,7 @@ async fn main() { let timer = Instant::now(); log::info!("Restoring state from db"); - let connection_pool = ConnectionPool::new(Some(1)).await; + let connection_pool = ConnectionPool::new(Some(1)); let mut storage = connection_pool .access_storage() .await diff --git a/core/bin/server/src/main.rs b/core/bin/server/src/main.rs index 7968330942..5523a096d6 100644 --- a/core/bin/server/src/main.rs +++ b/core/bin/server/src/main.rs @@ -44,7 +44,7 @@ async fn main() -> anyhow::Result<()> { // It's a `ServerCommand::Launch`, perform the usual routine. log::info!("Running the zkSync server"); - let connection_pool = ConnectionPool::new(None).await; + let connection_pool = ConnectionPool::new(None); let config_options = ConfigurationOptions::from_env(); let prover_options = ProverOptions::from_env(); diff --git a/core/bin/zksync_api/Cargo.toml b/core/bin/zksync_api/Cargo.toml index e327b13c4a..99e4b2cce2 100644 --- a/core/bin/zksync_api/Cargo.toml +++ b/core/bin/zksync_api/Cargo.toml @@ -10,6 +10,10 @@ keywords = ["blockchain", "zksync"] categories = ["cryptography"] publish = false # We don't want to publish our binaries. +[features] +default = [] +api_test = [] + [dependencies] zksync_types = { path = "../../lib/types", version = "1.0" } zksync_storage = { path = "../../lib/storage", version = "1.0" } @@ -53,8 +57,10 @@ thiserror = "1.0" futures01 = { package = "futures", version = "0.1" } reqwest = { version = "0.10", features = ["blocking", "json"] } tiny-keccak = "1.4.2" -async-trait = "0.1.31" +async-trait = "0.1" jsonwebtoken = "7" lru-cache = "0.1.2" +[dev-dependencies] +zksync_test_account = { path = "../../tests/test_account" } diff --git a/core/bin/zksync_api/src/api_server/mod.rs b/core/bin/zksync_api/src/api_server/mod.rs index 2ffbb542de..0cd311f9e2 100644 --- a/core/bin/zksync_api/src/api_server/mod.rs +++ b/core/bin/zksync_api/src/api_server/mod.rs @@ -4,6 +4,9 @@ //! `mod rpc_server` - JSON rpc via HTTP (for request reply functions) //! `mod rpc_subscriptions` - JSON rpc via WebSocket (for request reply functions and subscriptions) +// Public uses +pub use rest::v1; + // External uses use futures::channel::mpsc; // Workspace uses diff --git a/core/bin/zksync_api/src/api_server/rest/mod.rs b/core/bin/zksync_api/src/api_server/rest/mod.rs index 57c581b6c8..41b891321d 100644 --- a/core/bin/zksync_api/src/api_server/rest/mod.rs +++ b/core/bin/zksync_api/src/api_server/rest/mod.rs @@ -12,15 +12,25 @@ use self::v01::api_decl::ApiV01; mod helpers; mod v01; +pub mod v1; async fn start_server(api_v01: ApiV01, bind_to: SocketAddr) { let logger_format = crate::api_server::loggers::rest::get_logger_format(); + HttpServer::new(move || { let api_v01 = api_v01.clone(); + + let api_v1_scope = { + let pool = api_v01.connection_pool.clone(); + let env_options = api_v01.config_options.clone(); + v1::api_scope(pool, env_options) + }; + App::new() .wrap(middleware::Logger::new(&logger_format)) .wrap(Cors::new().send_wildcard().max_age(3600).finish()) .service(api_v01.into_scope()) + .service(api_v1_scope) // Endpoint needed for js isReachable .route( "/favicon.ico", diff --git a/core/bin/zksync_api/src/api_server/rest/v1/blocks.rs b/core/bin/zksync_api/src/api_server/rest/v1/blocks.rs new file mode 100644 index 0000000000..c930747e35 --- /dev/null +++ b/core/bin/zksync_api/src/api_server/rest/v1/blocks.rs @@ -0,0 +1,324 @@ +//! Blocks part of API implementation. + +// Built-in uses + +// External uses +use actix_web::{ + web::{self, Json}, + Scope, +}; +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use serde_json::Value; + +// Workspace uses +use zksync_config::ConfigurationOptions; +use zksync_crypto::{convert::FeConvert, serialization::FrSerde, Fr}; +use zksync_storage::{chain::block::records, ConnectionPool, QueryResult}; +use zksync_types::{tx::TxHash, BlockNumber}; + +// Local uses +use super::{ + client::{self, Client}, + Error as ApiError, JsonResult, Pagination, PaginationQuery, +}; +use crate::{api_server::rest::helpers::remove_prefix, utils::shared_lru_cache::AsyncLruCache}; + +/// Shared data between `api/v1/blocks` endpoints. +#[derive(Debug, Clone)] +struct ApiBlocksData { + pool: ConnectionPool, + /// Verified blocks cache. + verified_blocks: AsyncLruCache, +} + +impl ApiBlocksData { + fn new(pool: ConnectionPool, capacity: usize) -> Self { + Self { + pool, + verified_blocks: AsyncLruCache::new(capacity), + } + } + + /// Returns information about block with the specified number. + /// + /// This method caches some of the verified blocks. + async fn block_info( + &self, + block_number: BlockNumber, + ) -> QueryResult> { + if let Some(block) = self.verified_blocks.get(&block_number).await { + return Ok(Some(block)); + } + + let blocks = self.blocks_range(Some(block_number), 1).await?; + if let Some(block) = blocks.into_iter().next() { + // Check if this is exactly the requested block. + if block.block_number != block_number as i64 { + return Ok(None); + } + + // It makes sense to store in cache only fully verified blocks. + if block.is_verified() { + self.verified_blocks + .insert(block_number, block.clone()) + .await; + } + Ok(Some(block)) + } else { + Ok(None) + } + } + + /// Returns the block range up to the given block number. + /// + /// Note that this method doesn't use cache and always requests blocks from the database + async fn blocks_range( + &self, + max_block: Option, + limit: BlockNumber, + ) -> QueryResult> { + let max_block = max_block.unwrap_or(BlockNumber::MAX); + + let mut storage = self.pool.access_storage().await?; + storage + .chain() + .block_schema() + .load_block_range(max_block, limit) + .await + } + + /// Return transactions stored in the block with the specified number. + async fn block_transactions( + &self, + block_number: BlockNumber, + ) -> QueryResult> { + let mut storage = self.pool.access_storage().await?; + storage + .chain() + .block_schema() + .get_block_transactions(block_number) + .await + } +} + +// Data transfer objects. + +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] +pub struct BlockInfo { + pub block_number: BlockNumber, + #[serde(with = "FrSerde")] + pub new_state_root: Fr, + pub block_size: u64, + pub commit_tx_hash: Option, + pub verify_tx_hash: Option, + pub committed_at: DateTime, + pub verified_at: Option>, +} + +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] +pub struct TransactionInfo { + pub tx_hash: TxHash, + pub block_number: i64, + pub op: Value, + pub success: Option, + pub fail_reason: Option, + pub created_at: DateTime, +} + +impl From for BlockInfo { + fn from(inner: records::BlockDetails) -> Self { + Self { + block_number: inner.block_number as BlockNumber, + new_state_root: Fr::from_bytes(&inner.new_state_root) + .expect("Unable to decode `new_state_root` field"), + block_size: inner.block_size as u64, + commit_tx_hash: inner.commit_tx_hash.map(|bytes| { + TxHash::from_slice(&bytes).expect("Unable to decode `commit_tx_hash` field") + }), + verify_tx_hash: inner.verify_tx_hash.map(|bytes| { + TxHash::from_slice(&bytes).expect("Unable to decode `verify_tx_hash` field") + }), + committed_at: inner.committed_at, + verified_at: inner.verified_at, + } + } +} + +impl From for TransactionInfo { + fn from(inner: records::BlockTransactionItem) -> Self { + Self { + tx_hash: { + let mut slice = [0_u8; 32]; + + let tx_hex = remove_prefix(&inner.tx_hash); + hex::decode_to_slice(&tx_hex, &mut slice) + .expect("Unable to decode `tx_hash` field"); + TxHash::from_slice(&slice).unwrap() + }, + block_number: inner.block_number, + op: inner.op, + success: inner.success, + fail_reason: inner.fail_reason, + created_at: inner.created_at, + } + } +} + +// Client implementation + +/// Blocks API part. +impl Client { + /// Returns information about block with the specified number or null if block doesn't exist. + pub async fn block_by_id( + &self, + block_number: BlockNumber, + ) -> client::Result> { + self.get(&format!("blocks/{}", block_number)).send().await + } + + /// Returns information about transactions of the block with the specified number. + pub async fn block_transactions( + &self, + block_number: BlockNumber, + ) -> client::Result> { + self.get(&format!("blocks/{}/transactions", block_number)) + .send() + .await + } + + /// Returns information about several blocks in a range. + pub async fn blocks_range( + &self, + from: Pagination, + limit: BlockNumber, + ) -> client::Result> { + self.get("blocks") + .query(&from.into_query(limit)) + .send() + .await + } +} + +// Server implementation + +async fn block_by_id( + data: web::Data, + web::Path(block_number): web::Path, +) -> JsonResult> { + Ok(Json( + data.block_info(block_number) + .await + .map_err(ApiError::internal)? + .map(BlockInfo::from), + )) +} + +async fn block_transactions( + data: web::Data, + web::Path(block_number): web::Path, +) -> JsonResult> { + let transactions = data + .block_transactions(block_number) + .await + .map_err(ApiError::internal)?; + + Ok(Json( + transactions + .into_iter() + .map(TransactionInfo::from) + .collect(), + )) +} + +async fn blocks_range( + data: web::Data, + web::Query(pagination): web::Query, +) -> JsonResult> { + let (pagination, limit) = pagination.into_inner()?; + let max = pagination.into_max(limit)?; + + let range = data + .blocks_range(max, limit) + .await + .map_err(ApiError::internal)?; + // Handle edge case when "after + limit" greater than the total blocks count. + // TODO Handle this case directly in the `storage` crate. (#1151) + let range = if let Pagination::After(after) = pagination { + range + .into_iter() + .filter(|block| block.block_number > after as i64) + .map(BlockInfo::from) + .collect() + } else { + range.into_iter().map(BlockInfo::from).collect() + }; + + Ok(Json(range)) +} + +pub fn api_scope(env_options: &ConfigurationOptions, pool: ConnectionPool) -> Scope { + let data = ApiBlocksData::new(pool, env_options.api_requests_caches_size); + + web::scope("blocks") + .data(data) + .route("", web::get().to(blocks_range)) + .route("{id}", web::get().to(block_by_id)) + .route("{id}/transactions", web::get().to(block_transactions)) +} + +#[cfg(test)] +mod tests { + use super::{super::test_utils::TestServerConfig, *}; + + #[actix_rt::test] + async fn test_blocks_scope() -> anyhow::Result<()> { + let cfg = TestServerConfig::default(); + cfg.fill_database().await?; + + let (client, server) = + cfg.start_server(|cfg| api_scope(&cfg.env_options, cfg.pool.clone())); + + // Block requests part + let blocks: Vec = { + let mut storage = cfg.pool.access_storage().await?; + + let blocks = storage + .chain() + .block_schema() + .load_block_range(10, 10) + .await?; + + blocks.into_iter().map(From::from).collect() + }; + + assert_eq!(client.block_by_id(1).await?.unwrap(), blocks[4]); + assert_eq!(client.blocks_range(Pagination::Last, 5).await?, blocks); + assert_eq!( + client.blocks_range(Pagination::Before(2), 5).await?, + &blocks[4..5] + ); + assert_eq!( + client.blocks_range(Pagination::After(4), 5).await?, + &blocks[0..1] + ); + + // Transaction requests part. + let expected_txs: Vec = { + let mut storage = cfg.pool.access_storage().await?; + + let transactions = storage + .chain() + .block_schema() + .get_block_transactions(1) + .await?; + + transactions.into_iter().map(From::from).collect() + }; + assert_eq!(client.block_transactions(1).await?, expected_txs); + assert_eq!(client.block_transactions(2).await?, vec![]); + + server.stop().await; + Ok(()) + } +} diff --git a/core/bin/zksync_api/src/api_server/rest/v1/client.rs b/core/bin/zksync_api/src/api_server/rest/v1/client.rs new file mode 100644 index 0000000000..103f9de22c --- /dev/null +++ b/core/bin/zksync_api/src/api_server/rest/v1/client.rs @@ -0,0 +1,108 @@ +//! Built-in API client. + +// Public uses +pub use super::blocks::{BlockInfo, TransactionInfo}; + +// Built-in uses + +// External uses +use reqwest::StatusCode; +use serde::{de::DeserializeOwned, ser::Serialize}; +use thiserror::Error; + +// Workspace uses + +// Local uses + +pub type Result = std::result::Result; + +// TODO Make error handling as correct as possible. (#1152) +#[derive(Debug, Error)] +pub enum ClientError { + #[error("Bad request: {0}")] + BadRequest(super::Error), + #[error("A parse JSON error occurred: {0}")] + Parse(reqwest::Error), + #[error("An other error occurred: {0}")] + Other(reqwest::Error), + #[error("Method {0} not found")] + NotFound(String), +} + +impl From for ClientError { + fn from(inner: reqwest::Error) -> Self { + Self::Other(inner) + } +} + +/// Client reference implementation for interacting with zkSync REST API v1. +#[derive(Debug, Clone)] +pub struct Client { + inner: reqwest::Client, + url: String, +} + +impl Client { + /// Creates a new REST API client with the specified Url. + pub fn new(url: String) -> Self { + Self { + inner: reqwest::Client::new(), + url, + } + } + + fn endpoint(&self, method: &str) -> String { + [&self.url, "/api/v1/", method].concat() + } + + /// Constructs get request for the specified method. + pub(crate) fn get(&self, method: impl AsRef) -> ClientRequestBuilder { + let url = self.endpoint(method.as_ref()); + ClientRequestBuilder { + inner: self.inner.get(&url), + url, + } + } +} + +/// API specific wrapper over the `reqwest::RequestBuilder`. +#[derive(Debug)] +pub struct ClientRequestBuilder { + inner: reqwest::RequestBuilder, + url: String, +} + +impl ClientRequestBuilder { + /// Modify the query string of the URL. + /// + /// See [reqwest] documentation for details + /// + /// [reqwest]: https://docs.rs/reqwest/latest/reqwest/struct.RequestBuilder.html#method.query + pub fn query(self, query: &Q) -> Self { + Self { + inner: self.inner.query(query), + url: self.url, + } + } + + /// Constructs the Request and sends it to the target URL, returning a future Response. + /// + /// This method takes account of the responses structure and the error handling specific. + pub async fn send(self) -> self::Result { + let response = self.inner.send().await?; + + let status = response.status(); + if status.is_success() { + Ok(response.json().await.map_err(ClientError::Parse)?) + } else { + if status == StatusCode::NOT_FOUND { + return Err(ClientError::NotFound(self.url)); + } + + Err(ClientError::BadRequest(super::Error { + http_code: status, + body: response.json().await.map_err(ClientError::Parse)?, + })) + } + } +} diff --git a/core/bin/zksync_api/src/api_server/rest/v1/config.rs b/core/bin/zksync_api/src/api_server/rest/v1/config.rs new file mode 100644 index 0000000000..6ac5bd8dab --- /dev/null +++ b/core/bin/zksync_api/src/api_server/rest/v1/config.rs @@ -0,0 +1,116 @@ +//! Config part of API implementation. + +// Built-in uses + +// External uses +use actix_web::{web, Scope}; +use serde::{Deserialize, Serialize}; + +// Workspace uses +use zksync_config::ConfigurationOptions; +use zksync_types::{network::Network, Address}; + +// Local uses +use super::{ + client::{self, Client}, + Json, +}; + +/// Shared data between `api/v1/config` endpoints. +#[derive(Debug, Clone)] +struct ApiConfigData { + contract_address: Address, + deposit_confirmations: u64, + network: Network, +} + +impl ApiConfigData { + fn new(env_options: &ConfigurationOptions) -> Self { + Self { + contract_address: env_options.contract_eth_addr, + deposit_confirmations: env_options.confirmations_for_eth_event, + network: env_options.eth_network.parse().unwrap(), + } + } +} + +// Data transfer objects. + +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] +pub struct Contracts { + pub contract: Address, +} + +// Client implementation + +/// Configuration API part. +impl Client { + pub async fn contracts(&self) -> client::Result { + self.get("config/contracts").send().await + } + + pub async fn deposit_confirmations(&self) -> client::Result { + self.get("config/deposit_confirmations").send().await + } + + pub async fn network(&self) -> client::Result { + self.get("config/network").send().await + } +} + +// Server implementation + +async fn contracts(data: web::Data) -> Json { + Json(Contracts { + contract: data.contract_address, + }) +} + +async fn deposit_confirmations(data: web::Data) -> Json { + Json(data.deposit_confirmations) +} + +async fn network(data: web::Data) -> Json { + Json(data.network) +} + +pub fn api_scope(env_options: &ConfigurationOptions) -> Scope { + let data = ApiConfigData::new(env_options); + + web::scope("config") + .data(data) + .route("contracts", web::get().to(contracts)) + .route("network", web::get().to(network)) + .route( + "deposit_confirmations", + web::get().to(deposit_confirmations), + ) +} + +#[cfg(test)] +mod tests { + use super::{super::test_utils::TestServerConfig, *}; + + #[actix_rt::test] + async fn test_config_scope() -> anyhow::Result<()> { + let cfg = TestServerConfig::default(); + let (client, server) = cfg.start_server(|cfg| api_scope(&cfg.env_options)); + + assert_eq!( + client.deposit_confirmations().await?, + cfg.env_options.confirmations_for_eth_event + ); + + assert_eq!(client.network().await?, cfg.env_options.eth_network); + assert_eq!( + client.contracts().await?, + Contracts { + contract: cfg.env_options.contract_eth_addr + }, + ); + + server.stop().await; + + Ok(()) + } +} diff --git a/core/bin/zksync_api/src/api_server/rest/v1/error.rs b/core/bin/zksync_api/src/api_server/rest/v1/error.rs new file mode 100644 index 0000000000..18841b7832 --- /dev/null +++ b/core/bin/zksync_api/src/api_server/rest/v1/error.rs @@ -0,0 +1,108 @@ +// Built-in uses +use std::fmt::{self, Display}; + +// External uses +use actix_web::{dev::Body, http::HeaderValue, HttpResponse, ResponseError}; +use reqwest::{header::CONTENT_TYPE, StatusCode}; +use serde::{Deserialize, Serialize}; + +// Workspace uses + +// Local uses + +/// The error body that is returned in the response content. +#[derive(Debug, Serialize, Deserialize, Default)] +pub struct ErrorBody { + /// A URI reference that identifies the problem type. + #[serde(rename = "type", default, skip_serializing_if = "String::is_empty")] + pub docs_uri: String, + /// A short, human-readable summary of the problem. + #[serde(default, skip_serializing_if = "String::is_empty")] + pub title: String, + /// A human-readable explanation specific to this occurrence of the problem. + #[serde(default, skip_serializing_if = "String::is_empty")] + pub detail: String, + /// Error location in the source code. + #[serde(default, skip_serializing_if = "String::is_empty")] + pub location: String, + /// Internal error code. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub code: Option, +} + +/// An HTTP error structure. +#[derive(Debug)] +pub struct Error { + /// HTTP error code. + pub http_code: StatusCode, + /// HTTP error content serialized into JSON. + pub body: ErrorBody, +} + +impl Error { + /// Creates a new Error with the BAD_REQUEST (400) status code. + pub fn bad_request(title: impl Display) -> Self { + Self { + http_code: StatusCode::BAD_REQUEST, + body: ErrorBody { + title: title.to_string(), + ..ErrorBody::default() + }, + } + } + + /// Creates a new Error with the INTERNAL_SERVER_ERROR (500) status code. + pub fn internal(title: impl Display) -> Self { + Self { + http_code: StatusCode::INTERNAL_SERVER_ERROR, + body: ErrorBody { + title: title.to_string(), + ..ErrorBody::default() + }, + } + } + + /// Sets error title. + pub fn title(mut self, title: impl Display) -> Self { + self.body.title = title.to_string(); + self + } + + /// Sets error details. + pub fn detail(mut self, detail: impl Display) -> Self { + self.body.detail = detail.to_string(); + self + } +} + +impl Display for ErrorBody { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&self.title) + } +} + +impl Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{} ({})", self.body, self.http_code) + } +} + +impl ResponseError for Error { + fn status_code(&self) -> reqwest::StatusCode { + self.http_code + } + + fn error_response(&self) -> actix_web::HttpResponse { + let mut resp = HttpResponse::new(self.status_code()); + + match serde_json::to_vec_pretty(&self.body) { + Ok(body) => { + resp.headers_mut() + .insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); + resp.set_body(Body::from_slice(&body)) + } + + Err(err) => err.error_response(), + } + } +} diff --git a/core/bin/zksync_api/src/api_server/rest/v1/mod.rs b/core/bin/zksync_api/src/api_server/rest/v1/mod.rs new file mode 100644 index 0000000000..ae65ec60ab --- /dev/null +++ b/core/bin/zksync_api/src/api_server/rest/v1/mod.rs @@ -0,0 +1,166 @@ +//! First stable API implementation. + +// Public uses +pub use self::error::{Error, ErrorBody}; + +// Built-in uses + +// External uses +use actix_web::{ + web::{self, Json}, + Scope, +}; +use serde::{Deserialize, Serialize}; + +// Workspace uses +use zksync_config::ConfigurationOptions; +use zksync_storage::ConnectionPool; +use zksync_types::BlockNumber; + +// Local uses + +mod blocks; +pub mod client; +mod config; +mod error; +#[cfg(test)] +mod test_utils; + +/// Maximum limit value in the requests. +pub const MAX_LIMIT: u32 = 100; + +type JsonResult = std::result::Result, Error>; + +pub(crate) fn api_scope(pool: ConnectionPool, env_options: ConfigurationOptions) -> Scope { + web::scope("/api/v1") + .service(config::api_scope(&env_options)) + .service(blocks::api_scope(&env_options, pool)) +} + +/// Internal pagination query representation in according to spec: +/// +/// `?limit=..&[before={id}|after={id}]` where: +/// +/// - `limit` parameter is required +/// - if `before=#id` is set; returns `limit` objects before object with `id` (not including `id`) +/// - if `after=#id` is set; returns `limit` objects after object with `id` (not including `id`) +/// - if neither is set; returns last `limit` objects +#[derive(Debug, Serialize, Deserialize, Copy, Clone, PartialEq, Default)] +struct PaginationQuery { + before: Option, + after: Option, + limit: BlockNumber, +} + +/// Pagination request parameter. +/// +/// Used together with the limit parameter to perform pagination. +#[derive(Debug, Serialize, Deserialize, Copy, Clone, PartialEq)] +pub enum Pagination { + /// Request to return some items before specified (not including itself). + Before(BlockNumber), + /// Request to return some items after specified (not including itself) + After(BlockNumber), + /// Request to return some last items. + Last, +} + +impl PaginationQuery { + /// Parses the original query into a pair `(pagination, limit)`. + fn into_inner(self) -> Result<(Pagination, BlockNumber), Error> { + let (pagination, limit) = match self { + Self { + before: Some(before), + after: None, + limit, + } => Ok((Pagination::Before(before), limit)), + + Self { + before: None, + after: Some(after), + limit, + } => Ok((Pagination::After(after), limit)), + + Self { + before: None, + after: None, + limit, + } => Ok((Pagination::Last, limit)), + + _ => Err(Error::bad_request("Incorrect pagination query") + .detail("Pagination query contains both `before` and `after` values.")), + }?; + + if limit == 0 { + return Err(Error::bad_request("Incorrect pagination query") + .detail("Limit should be greater than zero")); + } + + if limit > MAX_LIMIT { + return Err(Error::bad_request("Incorrect pagination query") + .detail(format!("Limit should be lower than {}", MAX_LIMIT))); + } + + Ok((pagination, limit)) + } +} + +impl Pagination { + /// Converts `(pagination, limit)` pair into the `(max, limit)` pair to perform database queries. + /// + /// # Panics + /// + /// - if limit is zero. + fn into_max(self, limit: BlockNumber) -> Result, Error> { + assert!(limit > 0, "Limit should be greater than zero"); + + match self { + Pagination::Before(before) => { + if before < 1 { + return Err(Error::bad_request("Incorrect pagination query") + .detail("Before should be greater than zero")); + } + + Ok(Some(before - 1)) + } + Pagination::After(after) => Ok(Some(after + limit + 1)), + Pagination::Last => Ok(None), + } + } + + /// Converts `(pagination, limit)` pair into the query. + fn into_query(self, limit: BlockNumber) -> PaginationQuery { + match self { + Pagination::Before(before) => PaginationQuery { + before: Some(before), + limit, + ..PaginationQuery::default() + }, + Pagination::After(after) => PaginationQuery { + after: Some(after), + limit, + ..PaginationQuery::default() + }, + Pagination::Last => PaginationQuery { + limit, + ..PaginationQuery::default() + }, + } + } +} + +#[test] +fn pagination_before_max_limit() { + let pagination = Pagination::Before(10); + + let max = pagination.into_max(10).unwrap(); + assert_eq!(max, Some(9)) +} + +#[test] +fn pagination_after_max_limit() { + let pagination = Pagination::After(10); + + let max = pagination.into_max(10).unwrap(); + assert_eq!(max, Some(21)) +} diff --git a/core/bin/zksync_api/src/api_server/rest/v1/test_utils.rs b/core/bin/zksync_api/src/api_server/rest/v1/test_utils.rs new file mode 100644 index 0000000000..3bd62a61a4 --- /dev/null +++ b/core/bin/zksync_api/src/api_server/rest/v1/test_utils.rs @@ -0,0 +1,246 @@ +//! API testing helpers. + +// Built-in uses + +// External uses +use actix_web::{web, App, Scope}; + +// Workspace uses +use zksync_config::ConfigurationOptions; +use zksync_crypto::rand::{SeedableRng, XorShiftRng}; +use zksync_storage::test_data::{ + dummy_ethereum_tx_hash, gen_acc_random_updates, gen_unique_operation, + gen_unique_operation_with_txs, BLOCK_SIZE_CHUNKS, +}; +use zksync_storage::ConnectionPool; +use zksync_test_account::ZkSyncAccount; +use zksync_types::{ethereum::OperationType, helpers::apply_updates, AccountMap, Action}; +use zksync_types::{ + operations::{ChangePubKeyOp, TransferToNewOp}, + ExecutedOperations, ExecutedTx, ZkSyncOp, ZkSyncTx, +}; + +// Local uses +use super::client::Client; + +#[derive(Debug, Clone)] +pub struct TestServerConfig { + pub env_options: ConfigurationOptions, + pub pool: ConnectionPool, +} + +impl Default for TestServerConfig { + fn default() -> Self { + Self { + env_options: ConfigurationOptions::from_env(), + pool: ConnectionPool::new(Some(1)), + } + } +} + +impl TestServerConfig { + pub fn start_server(&self, scope_factory: F) -> (Client, actix_web::test::TestServer) + where + F: Fn(&TestServerConfig) -> Scope + Clone + Send + 'static, + { + let this = self.clone(); + let server = actix_web::test::start(move || { + App::new().service(web::scope("/api/v1").service(scope_factory(&this))) + }); + + let mut url = server.url(""); + url.pop(); // Pop last '/' symbol. + + let client = Client::new(url); + (client, server) + } + + /// Creates several transactions and the corresponding executed operations. + fn gen_zk_txs() -> Vec<(ZkSyncTx, ExecutedOperations)> { + let from = ZkSyncAccount::rand(); + from.set_account_id(Some(0xdead)); + + let to = ZkSyncAccount::rand(); + to.set_account_id(Some(0xf00d)); + + let mut txs = Vec::new(); + + // Sign change pubkey tx pair + { + let tx = from.sign_change_pubkey_tx(None, false, 0, 0_u64.into(), false); + + let zksync_op = ZkSyncOp::ChangePubKeyOffchain(Box::new(ChangePubKeyOp { + tx: tx.clone(), + account_id: from.get_account_id().unwrap(), + })); + + let executed_tx = ExecutedTx { + signed_tx: zksync_op.try_get_tx().unwrap().into(), + success: true, + op: Some(zksync_op), + fail_reason: None, + block_index: None, + created_at: chrono::Utc::now(), + batch_id: None, + }; + + txs.push(( + ZkSyncTx::ChangePubKey(Box::new(tx)), + ExecutedOperations::Tx(Box::new(executed_tx)), + )); + } + // Transfer tx pair + { + let tx = from + .sign_transfer( + 0, + "ETH", + 1_u64.into(), + 0_u64.into(), + &to.address, + None, + false, + ) + .0; + + let zksync_op = ZkSyncOp::TransferToNew(Box::new(TransferToNewOp { + tx: tx.clone(), + from: from.get_account_id().unwrap(), + to: to.get_account_id().unwrap(), + })); + + let executed_tx = ExecutedTx { + signed_tx: zksync_op.try_get_tx().unwrap().into(), + success: true, + op: Some(zksync_op), + fail_reason: None, + block_index: None, + created_at: chrono::Utc::now(), + batch_id: None, + }; + + txs.push(( + ZkSyncTx::Transfer(Box::new(tx)), + ExecutedOperations::Tx(Box::new(executed_tx)), + )); + } + + txs + } + + pub async fn fill_database(&self) -> anyhow::Result<()> { + let mut storage = self.pool.access_storage().await?; + + // Check if database is been already inited. + if storage.chain().block_schema().get_block(1).await?.is_some() { + return Ok(()); + } + + // Below lies the initialization of the data for the test. + let mut rng = XorShiftRng::from_seed([0, 1, 2, 3]); + + // Required since we use `EthereumSchema` in this test. + storage.ethereum_schema().initialize_eth_data().await?; + + let mut accounts = AccountMap::default(); + let n_committed = 5; + let n_verified = n_committed - 2; + + // Create and apply several blocks to work with. + for block_number in 1..=n_committed { + let updates = (0..3) + .map(|_| gen_acc_random_updates(&mut rng)) + .flatten() + .collect::>(); + apply_updates(&mut accounts, updates.clone()); + + // Add transactions to every odd block. + let txs = if block_number % 2 == 1 { + Self::gen_zk_txs().into_iter().map(|(_tx, op)| op).collect() + } else { + vec![] + }; + + // Store the operation in the block schema. + let operation = storage + .chain() + .block_schema() + .execute_operation(gen_unique_operation_with_txs( + block_number, + Action::Commit, + BLOCK_SIZE_CHUNKS, + txs, + )) + .await?; + storage + .chain() + .state_schema() + .commit_state_update(block_number, &updates, 0) + .await?; + + // Store & confirm the operation in the ethereum schema, as it's used for obtaining + // commit/verify hashes. + let ethereum_op_id = operation.id.unwrap() as i64; + let eth_tx_hash = dummy_ethereum_tx_hash(ethereum_op_id); + let response = storage + .ethereum_schema() + .save_new_eth_tx( + OperationType::Commit, + Some(ethereum_op_id), + 100, + 100u32.into(), + Default::default(), + ) + .await?; + storage + .ethereum_schema() + .add_hash_entry(response.id, ð_tx_hash) + .await?; + storage + .ethereum_schema() + .confirm_eth_tx(ð_tx_hash) + .await?; + + // Add verification for the block if required. + if block_number <= n_verified { + storage + .prover_schema() + .store_proof(block_number, &Default::default()) + .await?; + let operation = storage + .chain() + .block_schema() + .execute_operation(gen_unique_operation( + block_number, + Action::Verify { + proof: Default::default(), + }, + BLOCK_SIZE_CHUNKS, + )) + .await?; + + let ethereum_op_id = operation.id.unwrap() as i64; + let eth_tx_hash = dummy_ethereum_tx_hash(ethereum_op_id); + let response = storage + .ethereum_schema() + .save_new_eth_tx( + OperationType::Verify, + Some(ethereum_op_id), + 100, + 100u32.into(), + Default::default(), + ) + .await?; + storage + .ethereum_schema() + .add_hash_entry(response.id, ð_tx_hash) + .await?; + storage + .ethereum_schema() + .confirm_eth_tx(ð_tx_hash) + .await?; + } + } + Ok(()) + } +} diff --git a/core/bin/zksync_api/src/api_server/rpc_server/mod.rs b/core/bin/zksync_api/src/api_server/rpc_server/mod.rs index e693901605..155d93019d 100644 --- a/core/bin/zksync_api/src/api_server/rpc_server/mod.rs +++ b/core/bin/zksync_api/src/api_server/rpc_server/mod.rs @@ -144,18 +144,20 @@ impl RpcApp { /// Returns a message that user has to sign to send the transaction. /// If the transaction doesn't need a message signature, returns `None`. /// If any error is encountered during the message generation, returns `jsonrpc_core::Error`. - async fn get_tx_info_message_to_sign(&self, tx: &ZkSyncTx) -> Result> { + async fn get_tx_info_message_to_sign(&self, tx: &ZkSyncTx) -> Result>> { match tx { ZkSyncTx::Transfer(tx) => { let token = self.token_info_from_id(tx.token).await?; Ok(Some( - tx.get_ethereum_sign_message(&token.symbol, token.decimals), + tx.get_ethereum_sign_message(&token.symbol, token.decimals) + .into_bytes(), )) } ZkSyncTx::Withdraw(tx) => { let token = self.token_info_from_id(tx.token).await?; Ok(Some( - tx.get_ethereum_sign_message(&token.symbol, token.decimals), + tx.get_ethereum_sign_message(&token.symbol, token.decimals) + .into_bytes(), )) } _ => Ok(None), @@ -539,7 +541,7 @@ async fn send_verify_request_and_recv( async fn verify_tx_info_message_signature( tx: &ZkSyncTx, signature: Option, - msg_to_sign: Option, + msg_to_sign: Option>, req_channel: mpsc::Sender, ) -> Result { let eth_sign_data = match msg_to_sign { @@ -574,7 +576,7 @@ async fn verify_tx_info_message_signature( async fn verify_txs_batch_signature( batch: Vec, signature: TxEthSignature, - msgs_to_sign: Vec>, + msgs_to_sign: Vec>>, req_channel: mpsc::Sender, ) -> Result { let mut txs = Vec::with_capacity(batch.len()); @@ -592,12 +594,13 @@ async fn verify_txs_batch_signature( }); } // User is expected to sign hash of the data of all transactions in the batch. - let message = hex::encode(tiny_keccak::keccak256( + let message = tiny_keccak::keccak256( txs.iter() .flat_map(|tx| tx.tx.get_bytes()) .collect::>() .as_slice(), - )); + ) + .to_vec(); let eth_sign_data = EthSignData { signature, message }; let (sender, receiever) = oneshot::channel(); diff --git a/core/bin/zksync_api/src/eth_checker.rs b/core/bin/zksync_api/src/eth_checker.rs index 84574c5138..92df758991 100644 --- a/core/bin/zksync_api/src/eth_checker.rs +++ b/core/bin/zksync_api/src/eth_checker.rs @@ -45,22 +45,31 @@ impl EthereumChecker { pub async fn is_eip1271_signature_correct( &self, address: Address, - message: Vec, + message: &[u8], signature: EIP1271Signature, ) -> Result { - let hash = tiny_keccak::keccak256(&message); + // sign_message = keccak256("\x19Ethereum Signed Message:\n32" + keccak256(message)) + let sign_message = { + let hash = tiny_keccak::keccak256(&message); + let prefix = format!("\x19Ethereum Signed Message:\n{}", hash.len()); + let mut bytes = Vec::with_capacity(prefix.len() + hash.len()); + bytes.extend_from_slice(prefix.as_bytes()); + bytes.extend_from_slice(&hash); + + tiny_keccak::keccak256(&bytes) + }; let received: [u8; 4] = self .get_eip1271_contract(address) .query( "isValidSignature", - (hash, signature.0), + (sign_message, signature.0), None, Options::default(), None, ) .await - .map_err(|e| anyhow::format_err!("Failed to query contract isValidSignature: {}", e))?; + .unwrap_or_default(); Ok(received == EIP1271_SUCCESS_RETURN_VALUE) } diff --git a/core/bin/zksync_api/src/main.rs b/core/bin/zksync_api/src/main.rs index 233488f4bb..c16f8205f1 100644 --- a/core/bin/zksync_api/src/main.rs +++ b/core/bin/zksync_api/src/main.rs @@ -16,7 +16,7 @@ async fn main() -> anyhow::Result<()> { }) .expect("Error setting Ctrl+C handler"); } - let connection_pool = ConnectionPool::new(None).await; + let connection_pool = ConnectionPool::new(None); let task_handle = run_api(connection_pool, stop_signal_sender); diff --git a/core/bin/zksync_api/src/signature_checker.rs b/core/bin/zksync_api/src/signature_checker.rs index d10037271e..7da9a8aa49 100644 --- a/core/bin/zksync_api/src/signature_checker.rs +++ b/core/bin/zksync_api/src/signature_checker.rs @@ -147,7 +147,7 @@ async fn verify_eth_signature_single_tx( match &sign_data.signature { TxEthSignature::EthereumSignature(packed_signature) => { let signer_account = packed_signature - .signature_recover_signer(sign_data.message.as_bytes()) + .signature_recover_signer(&sign_data.message) .or(Err(TxAddError::IncorrectEthSignature))?; if signer_account != tx.tx.account() { @@ -155,16 +155,10 @@ async fn verify_eth_signature_single_tx( } } TxEthSignature::EIP1271Signature(signature) => { - let message = format!( - "\x19Ethereum Signed Message:\n{}{}", - sign_data.message.len(), - &sign_data.message - ); - let signature_correct = eth_checker .is_eip1271_signature_correct( tx.tx.account(), - message.into_bytes(), + &sign_data.message, signature.clone(), ) .await @@ -188,7 +182,7 @@ async fn verify_eth_signature_txs_batch( match ð_sign_data.signature { TxEthSignature::EthereumSignature(packed_signature) => { let signer_account = packed_signature - .signature_recover_signer(ð_sign_data.message.as_bytes()) + .signature_recover_signer(ð_sign_data.message) .or(Err(TxAddError::IncorrectEthSignature))?; if txs.iter().any(|tx| tx.tx.account() != signer_account) { @@ -196,18 +190,11 @@ async fn verify_eth_signature_txs_batch( } } TxEthSignature::EIP1271Signature(signature) => { - // Prefix the message. - let message = format!( - "\x19Ethereum Signed Message:\n{}{}", - eth_sign_data.message.len(), - ð_sign_data.message - ); - for tx in txs { let signature_correct = eth_checker .is_eip1271_signature_correct( tx.tx.account(), - message.as_bytes().to_vec(), + ð_sign_data.message, signature.clone(), ) .await diff --git a/core/bin/zksync_api/src/utils/shared_lru_cache.rs b/core/bin/zksync_api/src/utils/shared_lru_cache.rs index 2374fa4a55..f2a80dbd74 100644 --- a/core/bin/zksync_api/src/utils/shared_lru_cache.rs +++ b/core/bin/zksync_api/src/utils/shared_lru_cache.rs @@ -1,6 +1,7 @@ use lru_cache::LruCache; use std::hash::Hash; use std::sync::{Arc, Mutex}; +use tokio::sync::Mutex as TokioMutex; /// `SharedLruCache` is an thread-safe alternative of the `LruCache`. /// Unlike the `LruCache`, getter method returns a cloned value instead of the reference to @@ -24,3 +25,26 @@ impl SharedLruCache { self.0.lock().unwrap().get_mut(&key).cloned() } } + +/// `AsyncLruCache` is an thread-safe alternative of the `LruCache`. +/// Unlike the `LruCache`, getter method returns a cloned value instead of the reference to +/// fulfill the thread safety requirements. +/// +/// Note that this structure uses `tokio::sync::Mutex` internally, so it is not recommended to use it in +/// single-threaded environment. +#[derive(Clone, Debug)] +pub struct AsyncLruCache(Arc>>); + +impl AsyncLruCache { + pub fn new(capacity: usize) -> Self { + Self(Arc::new(TokioMutex::new(LruCache::new(capacity)))) + } + + pub async fn insert(&self, key: K, value: V) { + self.0.lock().await.insert(key, value); + } + + pub async fn get(&self, key: &K) -> Option { + self.0.lock().await.get_mut(&key).cloned() + } +} diff --git a/core/bin/zksync_core/src/bin/eth_watcher.rs b/core/bin/zksync_core/src/bin/eth_watcher.rs index f3e21434a4..db0e48b9c1 100644 --- a/core/bin/zksync_core/src/bin/eth_watcher.rs +++ b/core/bin/zksync_core/src/bin/eth_watcher.rs @@ -19,7 +19,7 @@ fn main() { let (eth_req_sender, eth_req_receiver) = mpsc::channel(256); - let db_pool = main_runtime.block_on(async { ConnectionPool::new(None).await }); + let db_pool = ConnectionPool::new(None); let watcher = EthWatch::new(web3, contract_address, 0, eth_req_receiver, db_pool); diff --git a/core/bin/zksync_core/src/eth_watch/mod.rs b/core/bin/zksync_core/src/eth_watch/mod.rs index d049b2f1f1..7b61e368c0 100644 --- a/core/bin/zksync_core/src/eth_watch/mod.rs +++ b/core/bin/zksync_core/src/eth_watch/mod.rs @@ -35,10 +35,6 @@ use zksync_types::{ // Local deps use self::{eth_state::ETHState, received_ops::sift_outdated_ops}; -/// isValidSignature return value according to EIP1271 standard -/// bytes4(keccak256("isValidSignature(bytes32,bytes)") -pub const EIP1271_SUCCESS_RETURN_VALUE: [u8; 4] = [0x20, 0xc1, 0x3b, 0x0b]; - mod eth_state; mod received_ops; diff --git a/core/bin/zksync_core/src/lib.rs b/core/bin/zksync_core/src/lib.rs index 8dba097fc4..d6e56d6e9d 100644 --- a/core/bin/zksync_core/src/lib.rs +++ b/core/bin/zksync_core/src/lib.rs @@ -92,7 +92,7 @@ pub async fn wait_for_tasks(task_futures: Vec>) { /// Inserts the initial information about zkSync tokens into the database. pub async fn genesis_init() { - let pool = ConnectionPool::new(Some(1)).await; + let pool = ConnectionPool::new(Some(1)); let config_options = ConfigurationOptions::from_env(); log::info!("Generating genesis block."); diff --git a/core/bin/zksync_core/src/main.rs b/core/bin/zksync_core/src/main.rs index 5071f0f5a0..cefae30df5 100644 --- a/core/bin/zksync_core/src/main.rs +++ b/core/bin/zksync_core/src/main.rs @@ -16,7 +16,7 @@ async fn main() -> anyhow::Result<()> { }) .expect("Error setting Ctrl+C handler"); } - let connection_pool = ConnectionPool::new(None).await; + let connection_pool = ConnectionPool::new(None); let task_handles = run_core(connection_pool, stop_signal_sender) .await diff --git a/core/bin/zksync_eth_sender/Cargo.toml b/core/bin/zksync_eth_sender/Cargo.toml index 10979ea3a4..f4a97552a7 100644 --- a/core/bin/zksync_eth_sender/Cargo.toml +++ b/core/bin/zksync_eth_sender/Cargo.toml @@ -39,3 +39,4 @@ async-trait = "0.1.31" [dev-dependencies] chrono = { version = "0.4", features = ["serde"] } +lazy_static = "1.4.0" diff --git a/core/bin/zksync_eth_sender/src/database.rs b/core/bin/zksync_eth_sender/src/database.rs index 5ecc82d87f..ed7a8da1e3 100644 --- a/core/bin/zksync_eth_sender/src/database.rs +++ b/core/bin/zksync_eth_sender/src/database.rs @@ -18,6 +18,87 @@ use zksync_types::{ // Local uses use super::transactions::ETHStats; +/// Abstract database access trait, optimized for the needs of `ETHSender`. +#[async_trait::async_trait] +pub(super) trait DatabaseInterface { + /// Returns connection to the database. + async fn acquire_connection(&self) -> anyhow::Result>; + + /// Loads the unconfirmed and unprocessed operations from the database. + /// Unconfirmed operations are Ethereum operations that were started, but not confirmed yet. + /// Unprocessed operations are zkSync operations that were not started at all. + async fn restore_state( + &self, + connection: &mut StorageProcessor<'_>, + ) -> anyhow::Result<(VecDeque, Vec)>; + + /// Loads the unprocessed operations from the database. + /// Unprocessed operations are zkSync operations that were not started at all. + async fn load_new_operations( + &self, + connection: &mut StorageProcessor<'_>, + ) -> anyhow::Result>; + + /// Saves a new unconfirmed operation to the database. + async fn save_new_eth_tx( + &self, + connection: &mut StorageProcessor<'_>, + op_type: OperationType, + op: Option, + deadline_block: i64, + used_gas_price: U256, + raw_tx: Vec, + ) -> anyhow::Result; + + /// Adds a tx hash entry associated with some Ethereum operation to the database. + async fn add_hash_entry( + &self, + connection: &mut StorageProcessor<'_>, + eth_op_id: i64, + hash: &H256, + ) -> anyhow::Result<()>; + + /// Adds a new tx info to the previously started Ethereum operation. + async fn update_eth_tx( + &self, + connection: &mut StorageProcessor<'_>, + eth_op_id: EthOpId, + new_deadline_block: i64, + new_gas_value: U256, + ) -> anyhow::Result<()>; + + /// Marks an operation as completed in the database. + async fn confirm_operation( + &self, + connection: &mut StorageProcessor<'_>, + hash: &H256, + op: ÐOperation, + ) -> anyhow::Result<()>; + + /// Loads the stored Ethereum operations stats. + async fn load_stats(&self, connection: &mut StorageProcessor<'_>) -> anyhow::Result; + + /// Loads the stored gas price limit. + async fn load_gas_price_limit( + &self, + connection: &mut StorageProcessor<'_>, + ) -> anyhow::Result; + + /// Updates the stored gas price limit. + async fn update_gas_price_params( + &self, + connection: &mut StorageProcessor<'_>, + gas_price_limit: U256, + average_gas_price: U256, + ) -> anyhow::Result<()>; + + async fn is_previous_operation_confirmed( + &self, + connection: &mut StorageProcessor<'_>, + op: ÐOperation, + ) -> anyhow::Result; +} + /// The actual database wrapper. /// This structure uses `StorageProcessor` to interact with an existing database. #[derive(Debug)] @@ -32,14 +113,15 @@ impl Database { } } -impl Database { - pub async fn acquire_connection(&self) -> anyhow::Result> { +#[async_trait::async_trait] +impl DatabaseInterface for Database { + async fn acquire_connection(&self) -> anyhow::Result> { let connection = self.db_pool.access_storage().await?; Ok(connection) } - pub async fn restore_state( + async fn restore_state( &self, connection: &mut StorageProcessor<'_>, ) -> anyhow::Result<(VecDeque, Vec)> { @@ -54,7 +136,7 @@ impl Database { Ok((unconfirmed_ops, unprocessed_ops)) } - pub async fn load_new_operations( + async fn load_new_operations( &self, connection: &mut StorageProcessor<'_>, ) -> anyhow::Result> { @@ -65,7 +147,7 @@ impl Database { Ok(unprocessed_ops) } - pub async fn save_new_eth_tx( + async fn save_new_eth_tx( &self, connection: &mut StorageProcessor<'_>, op_type: OperationType, @@ -88,7 +170,7 @@ impl Database { Ok(result) } - pub async fn add_hash_entry( + async fn add_hash_entry( &self, connection: &mut StorageProcessor<'_>, eth_op_id: i64, @@ -100,7 +182,7 @@ impl Database { .await?) } - pub async fn update_eth_tx( + async fn update_eth_tx( &self, connection: &mut StorageProcessor<'_>, eth_op_id: EthOpId, @@ -117,7 +199,7 @@ impl Database { .await?) } - pub async fn is_previous_operation_confirmed( + async fn is_previous_operation_confirmed( &self, connection: &mut StorageProcessor<'_>, op: ÐOperation, @@ -152,7 +234,7 @@ impl Database { Ok(confirmed) } - pub async fn confirm_operation( + async fn confirm_operation( &self, connection: &mut StorageProcessor<'_>, hash: &H256, @@ -175,15 +257,12 @@ impl Database { Ok(()) } - pub async fn load_stats( - &self, - connection: &mut StorageProcessor<'_>, - ) -> anyhow::Result { + async fn load_stats(&self, connection: &mut StorageProcessor<'_>) -> anyhow::Result { let stats = connection.ethereum_schema().load_stats().await?; Ok(stats.into()) } - pub async fn load_gas_price_limit( + async fn load_gas_price_limit( &self, connection: &mut StorageProcessor<'_>, ) -> anyhow::Result { @@ -191,7 +270,7 @@ impl Database { Ok(limit) } - pub async fn update_gas_price_params( + async fn update_gas_price_params( &self, connection: &mut StorageProcessor<'_>, gas_price_limit: U256, diff --git a/core/bin/zksync_eth_sender/src/ethereum_interface.rs b/core/bin/zksync_eth_sender/src/ethereum_interface.rs index fa438d9b5d..68b857e8c0 100644 --- a/core/bin/zksync_eth_sender/src/ethereum_interface.rs +++ b/core/bin/zksync_eth_sender/src/ethereum_interface.rs @@ -43,16 +43,16 @@ pub(super) trait EthereumInterface { /// - If transaction was not executed, returned value is `None`. /// - If transaction was executed, the information about its success and amount /// of confirmations is returned. - async fn get_tx_status(&self, hash: &H256) -> Result, anyhow::Error>; + async fn get_tx_status(&self, hash: &H256) -> anyhow::Result>; /// Gets the actual block number. - async fn block_number(&self) -> Result; + async fn block_number(&self) -> anyhow::Result; /// Gets the current gas price. - async fn gas_price(&self) -> Result; + async fn gas_price(&self) -> anyhow::Result; /// Sends a signed transaction to the Ethereum blockchain. - async fn send_tx(&self, signed_tx: &SignedCallResult) -> Result<(), anyhow::Error>; + async fn send_tx(&self, signed_tx: &SignedCallResult) -> anyhow::Result<()>; /// Encodes the transaction data (smart contract method and its input) to the bytes /// without creating an actual transaction. @@ -64,7 +64,7 @@ pub(super) trait EthereumInterface { &self, data: Vec, options: Options, - ) -> Result; + ) -> anyhow::Result; /// Returns the information about transaction failure reason. async fn failure_reason(&self, tx_hash: H256) -> Option; @@ -78,7 +78,7 @@ pub struct EthereumHttpClient { } impl EthereumHttpClient { - pub fn new(options: &ConfigurationOptions) -> Result { + pub fn new(options: &ConfigurationOptions) -> anyhow::Result { let transport = Http::new(&options.web3_url)?; let ethereum_signer = PrivateKeySigner::new( options @@ -107,7 +107,7 @@ impl EthereumHttpClient { #[async_trait::async_trait] impl EthereumInterface for EthereumHttpClient { - async fn get_tx_status(&self, hash: &H256) -> Result, anyhow::Error> { + async fn get_tx_status(&self, hash: &H256) -> anyhow::Result> { self.sleep(); let receipt = self .eth_client @@ -145,13 +145,13 @@ impl EthereumInterface for EthereumHttpClient { } } - async fn block_number(&self) -> Result { + async fn block_number(&self) -> anyhow::Result { self.sleep(); let block_number = self.eth_client.web3.eth().block_number().await?; Ok(block_number.as_u64()) } - async fn send_tx(&self, signed_tx: &SignedCallResult) -> Result<(), anyhow::Error> { + async fn send_tx(&self, signed_tx: &SignedCallResult) -> anyhow::Result<()> { self.sleep(); let hash = self .eth_client @@ -164,7 +164,7 @@ impl EthereumInterface for EthereumHttpClient { Ok(()) } - async fn gas_price(&self) -> Result { + async fn gas_price(&self) -> anyhow::Result { self.sleep(); self.eth_client.get_gas_price().await } @@ -177,7 +177,7 @@ impl EthereumInterface for EthereumHttpClient { &self, data: Vec, options: Options, - ) -> Result { + ) -> anyhow::Result { self.sleep(); self.eth_client.sign_prepared_tx(data, options).await } diff --git a/core/bin/zksync_eth_sender/src/gas_adjuster/mod.rs b/core/bin/zksync_eth_sender/src/gas_adjuster/mod.rs index 2c01d993e6..71ffc848fc 100644 --- a/core/bin/zksync_eth_sender/src/gas_adjuster/mod.rs +++ b/core/bin/zksync_eth_sender/src/gas_adjuster/mod.rs @@ -3,13 +3,12 @@ use std::{collections::VecDeque, marker::PhantomData, time::Instant}; // External deps use zksync_basic_types::U256; // Local deps -use crate::{database::Database, ethereum_interface::EthereumInterface}; +use crate::{database::DatabaseInterface, ethereum_interface::EthereumInterface}; mod parameters; -// TODO: Restore tests (#1109). -// #[cfg(test)] -// mod tests; +#[cfg(test)] +mod tests; /// Gas adjuster is an entity capable of scaling the gas price for /// all the Ethereum transactions. @@ -21,7 +20,7 @@ mod parameters; /// gas price for transactions that were not mined by the network /// within a reasonable time. #[derive(Debug)] -pub(super) struct GasAdjuster { +pub(super) struct GasAdjuster { /// Collected statistics about recently used gas prices. statistics: GasStatistics, /// Timestamp of the last maximum gas price update. @@ -30,10 +29,11 @@ pub(super) struct GasAdjuster { last_sample_added: Instant, _etherum_client: PhantomData, + _db: PhantomData, } -impl GasAdjuster { - pub async fn new(db: &Database) -> Self { +impl GasAdjuster { + pub async fn new(db: &DB) -> Self { let mut connection = db .acquire_connection() .await @@ -48,6 +48,7 @@ impl GasAdjuster { last_sample_added: Instant::now(), _etherum_client: PhantomData, + _db: PhantomData, } } @@ -57,7 +58,7 @@ impl GasAdjuster { &mut self, ethereum: Ð, old_tx_gas_price: Option, - ) -> Result { + ) -> anyhow::Result { let network_price = ethereum.gas_price().await?; let scaled_price = if let Some(old_price) = old_tx_gas_price { @@ -88,7 +89,7 @@ impl GasAdjuster { /// Performs an actualization routine for `GasAdjuster`: /// This method is intended to be invoked periodically, and it updates the /// current max gas price limit according to the configurable update interval. - pub async fn keep_updated(&mut self, ethereum: Ð, db: &Database) { + pub async fn keep_updated(&mut self, ethereum: Ð, db: &DB) { if self.last_sample_added.elapsed() >= parameters::sample_adding_interval() { // Report the current price to be gathered by the statistics module. match ethereum.gas_price().await { @@ -161,7 +162,7 @@ impl GasAdjuster { /// Helper structure responsible for collecting the data about recent transactions, /// calculating the average gas price, and providing the gas price limit. #[derive(Debug)] -struct GasStatistics { +pub(super) struct GasStatistics { samples: VecDeque, current_sum: U256, current_max_price: U256, diff --git a/core/bin/zksync_eth_sender/src/gas_adjuster/tests.rs b/core/bin/zksync_eth_sender/src/gas_adjuster/tests.rs index 4516c34134..f9890652cf 100644 --- a/core/bin/zksync_eth_sender/src/gas_adjuster/tests.rs +++ b/core/bin/zksync_eth_sender/src/gas_adjuster/tests.rs @@ -1,15 +1,15 @@ -// External uses +// Workspace uses use zksync_basic_types::U256; // Local uses -use crate::eth_sender::{ - database::DatabaseAccess, - gas_adjuster::{parameters::limit_scale_factor, GasAdjuster, GasStatistics}, +use crate::{ + gas_adjuster::{parameters::limit_scale_factor, GasStatistics}, tests::mock::{default_eth_sender, MockDatabase, MockEthereum}, + DatabaseInterface, GasAdjuster, }; /// Creates `Ethereum` and `Database` instances for the `GasAdjuster` tests. -fn eth_and_db_clients() -> (MockEthereum, MockDatabase) { - let (eth_sender, _, _) = default_eth_sender(); +async fn eth_and_db_clients() -> (MockEthereum, MockDatabase) { + let eth_sender = default_eth_sender().await; (eth_sender.ethereum, eth_sender.db) } @@ -23,18 +23,26 @@ fn scale_gas_limit(value: u64) -> u64 { /// Checks that by default (with no previous tx info provided), GasAdjuster /// provides the gas price suggested by the client. -#[test] -fn initial_price() { - let (mut ethereum, db) = eth_and_db_clients(); - let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db); +#[tokio::test] +async fn initial_price() { + let (mut ethereum, db) = eth_and_db_clients().await; + let mut connection = db.acquire_connection().await.unwrap(); + let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db).await; // Vector of ethereum client prices. - let test_vector = vec![0, 13, db.load_gas_price_limit().unwrap().low_u64()]; + let test_vector = vec![ + 0, + 13, + db.load_gas_price_limit(&mut connection) + .await + .unwrap() + .low_u64(), + ]; for eth_client_price in test_vector { ethereum.gas_price = eth_client_price.into(); - let scaled_gas = gas_adjuster.get_gas_price(ðereum, None).unwrap(); + let scaled_gas = gas_adjuster.get_gas_price(ðereum, None).await.unwrap(); assert_eq!(scaled_gas, eth_client_price.into()); } } @@ -42,11 +50,11 @@ fn initial_price() { /// Test for the lower gas limit: it should be a network-suggested price for new transactions, /// and for stuck transactions it should be the maximum of either price increased by 15% or /// the network-suggested price. -#[test] -fn lower_gas_limit() { - let (mut ethereum, db) = eth_and_db_clients(); +#[tokio::test] +async fn lower_gas_limit() { + let (mut ethereum, db) = eth_and_db_clients().await; - let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db); + let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db).await; // Test vector of pairs (ethereum client price, price of the last tx, expected price). let test_vector = vec![ @@ -64,20 +72,21 @@ fn lower_gas_limit() { // Check that gas price of 1000 is increased to 1150. let scaled_gas = gas_adjuster .get_gas_price(ðereum, Some(previous_price.into())) + .await .unwrap(); assert_eq!(scaled_gas, expected_price.into()); } } // Checks that after re-creation the price limit is restored from the database. -#[test] -fn gas_price_limit_restore() { +#[tokio::test] +async fn gas_price_limit_restore() { // Price limit to set (should be obtained from the DB by GasAdjuster). const PRICE_LIMIT: u64 = 1000; - let (_, db) = eth_and_db_clients(); - db.update_gas_price_limit(PRICE_LIMIT.into()).unwrap(); - let gas_adjuster: GasAdjuster = GasAdjuster::new(&db); + let (_, db) = eth_and_db_clients().await; + db.update_gas_price_limit(PRICE_LIMIT.into()).await.unwrap(); + let gas_adjuster: GasAdjuster = GasAdjuster::new(&db).await; assert_eq!(gas_adjuster.get_current_max_price(), PRICE_LIMIT.into()); } @@ -85,22 +94,21 @@ fn gas_price_limit_restore() { /// Checks that price is clamped according to the current limit. /// This check works with the initial value only, and does not update it /// with the gathered stats. -#[test] -fn initial_upper_gas_limit() { +#[tokio::test] +async fn initial_upper_gas_limit() { // Initial price limit to set. const PRICE_LIMIT: u64 = 1000; - let (mut ethereum, db) = eth_and_db_clients(); - - db.update_gas_price_limit(PRICE_LIMIT.into()).unwrap(); + let (mut ethereum, db) = eth_and_db_clients().await; - let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db); + db.update_gas_price_limit(PRICE_LIMIT.into()).await.unwrap(); + let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db).await; // Set the gas price in Ethereum, which is greater than the current limit. ethereum.gas_price = U256::from(PRICE_LIMIT) + 1; // Check that gas price of `PRICE_LIMIT` + 1 is clamped to `PRICE_LIMIT`. - let scaled_gas = gas_adjuster.get_gas_price(ðereum, None).unwrap(); + let scaled_gas = gas_adjuster.get_gas_price(ðereum, None).await.unwrap(); assert_eq!(scaled_gas, PRICE_LIMIT.into()); // Check that gas price is clamped even if both the ethereum client price @@ -110,6 +118,7 @@ fn initial_upper_gas_limit() { let scaled_gas = gas_adjuster .get_gas_price(ðereum, Some(previous_price)) + .await .unwrap(); assert_eq!(scaled_gas, PRICE_LIMIT.into()); } @@ -118,8 +127,8 @@ fn initial_upper_gas_limit() { /// We are successively keep requesting the gas price with the /// ethereum client suggesting the price far beyond the current limit /// and expect the price limit to be updated according to the schedule. -#[test] -fn gas_price_limit_scaling() { +#[tokio::test] +async fn gas_price_limit_scaling() { // Amount of times we'll call `GasAdjuster::keep_updated`. const PRICE_UPDATES: u64 = 5; // Amount of samples to gather statistics. @@ -127,9 +136,12 @@ fn gas_price_limit_scaling() { // Initial price limit to set. const PRICE_LIMIT: u64 = 1000; - let (mut ethereum, db) = eth_and_db_clients(); - db.update_gas_price_limit(PRICE_LIMIT.into()).unwrap(); - let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db); + let (mut ethereum, db) = eth_and_db_clients().await; + let mut connection = db.acquire_connection().await.unwrap(); + + db.update_gas_price_limit(PRICE_LIMIT.into()).await.unwrap(); + + let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db).await; // Set the client price way beyond the limit. ethereum.gas_price = U256::from(PRICE_LIMIT * 2); @@ -140,6 +152,7 @@ fn gas_price_limit_scaling() { for _ in 0..N_SAMPLES { let suggested_price = gas_adjuster .get_gas_price(ðereum, Some(expected_price.into())) + .await .unwrap(); // Until we call `keep_updated`, the suggested price should not change and should be @@ -147,7 +160,7 @@ fn gas_price_limit_scaling() { assert_eq!(suggested_price, expected_price.into()); // Update the limit. - gas_adjuster.keep_updated(ðereum, &db); + gas_adjuster.keep_updated(ðereum, &db).await; } // Stats are gathered. Now they're based on the Ethereum price. @@ -157,11 +170,11 @@ fn gas_price_limit_scaling() { // Each time the limit will be changed, so it's not checked. Instead, we check // the expected limit after `N_SAMPLES` below (it's simpler). for _ in 0..N_SAMPLES { - gas_adjuster.keep_updated(ðereum, &db); + gas_adjuster.keep_updated(ðereum, &db).await; } // Check that new limit is scaled old limit (and also check that it's stored in the DB). - let new_limit = db.load_gas_price_limit().unwrap(); + let new_limit = db.load_gas_price_limit(&mut connection).await.unwrap(); assert_eq!(new_limit, scale_gas_limit(expected_price).into()); // Update the expected price for the next round. @@ -174,9 +187,9 @@ fn gas_price_limit_scaling() { /// Checks that if the price suggested by the Ethereum client is below the price limit, /// the limit is calculated as (average of samples) * scale_factor. -#[test] -#[ignore] // TODO: Disabled as currently the limit is calculated based on the network price rather than used txs samples (#1109). -fn gas_price_limit_average_basis() { +#[tokio::test] +#[ignore] // TODO: Disabled as currently the limit is calculated based on the network price rather than used txs samples (#1130). +async fn gas_price_limit_average_basis() { // Increases the gas price value by 15%. fn increase_gas_price(value: u64) -> u64 { value * 115 / 100 @@ -191,9 +204,10 @@ fn gas_price_limit_average_basis() { // Price suggested by Ethereum client; const SUGGESTED_PRICE: u64 = 10; - let (mut ethereum, db) = eth_and_db_clients(); - db.update_gas_price_limit(PRICE_LIMIT.into()).unwrap(); - let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db); + let (mut ethereum, db) = eth_and_db_clients().await; + let mut connection = db.acquire_connection().await.unwrap(); + db.update_gas_price_limit(PRICE_LIMIT.into()).await.unwrap(); + let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db).await; // Set the client price way beyond the limit. ethereum.gas_price = SUGGESTED_PRICE.into(); @@ -208,6 +222,7 @@ fn gas_price_limit_average_basis() { for _ in 0..N_SAMPLES { let suggested_price = gas_adjuster .get_gas_price(ðereum, Some(expected_price.into())) + .await .unwrap(); let increased_price = increase_gas_price(expected_price); @@ -226,10 +241,10 @@ fn gas_price_limit_average_basis() { } // Keep the limit updated (it should become (avg of prices) * (scale factor). - gas_adjuster.keep_updated(ðereum, &db); + gas_adjuster.keep_updated(ðereum, &db).await; // Check that new limit is based on the average of previous N samples. - let new_limit = db.load_gas_price_limit().unwrap(); + let new_limit = db.load_gas_price_limit(&mut connection).await.unwrap(); current_limit = scale_gas_limit(samples_sum / N_SAMPLES as u64); assert_eq!(new_limit, current_limit.into()); @@ -237,8 +252,8 @@ fn gas_price_limit_average_basis() { } /// Checks that if the gas price limit is never achieved, it never increased as well. -#[test] -fn gas_price_limit_preservation() { +#[tokio::test] +async fn gas_price_limit_preservation() { // Amount of times we'll call `GasAdjuster::keep_updated`. // The value is lower than in tests above, since the limit must not change. const PRICE_UPDATES: u64 = 2; @@ -249,9 +264,10 @@ fn gas_price_limit_preservation() { // Price limit to set: it's based on the suggested price, so it won't ever change. let price_limit = scale_gas_limit(SUGGESTED_PRICE); - let (mut ethereum, db) = eth_and_db_clients(); - db.update_gas_price_limit(price_limit.into()).unwrap(); - let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db); + let (mut ethereum, db) = eth_and_db_clients().await; + let mut connection = db.acquire_connection().await.unwrap(); + db.update_gas_price_limit(price_limit.into()).await.unwrap(); + let mut gas_adjuster: GasAdjuster = GasAdjuster::new(&db).await; // Set the client price way beyond the limit. ethereum.gas_price = SUGGESTED_PRICE.into(); @@ -262,13 +278,13 @@ fn gas_price_limit_preservation() { // Every time we get the new price (without old price provided), so no scaling // involved, every time an Ethereum client price is provided (since it's lower // than the limit). - let suggested_price = gas_adjuster.get_gas_price(ðereum, None).unwrap(); + let suggested_price = gas_adjuster.get_gas_price(ðereum, None).await.unwrap(); assert_eq!(suggested_price, SUGGESTED_PRICE.into()); } // Keep the limit updated (it should not change). - gas_adjuster.keep_updated(ðereum, &db); - let new_limit = db.load_gas_price_limit().unwrap(); + gas_adjuster.keep_updated(ðereum, &db).await; + let new_limit = db.load_gas_price_limit(&mut connection).await.unwrap(); assert_eq!(new_limit, price_limit.into()); } } diff --git a/core/bin/zksync_eth_sender/src/lib.rs b/core/bin/zksync_eth_sender/src/lib.rs index 30bb9e86e3..522a8c0f0f 100644 --- a/core/bin/zksync_eth_sender/src/lib.rs +++ b/core/bin/zksync_eth_sender/src/lib.rs @@ -24,7 +24,7 @@ use zksync_types::{ }; // Local uses use self::{ - database::Database, + database::{Database, DatabaseInterface}, ethereum_interface::{EthereumHttpClient, EthereumInterface}, gas_adjuster::GasAdjuster, transactions::*, @@ -37,9 +37,8 @@ mod gas_adjuster; mod transactions; mod tx_queue; -// TODO: Restore tests (#1109). -// #[cfg(test)] -// mod tests; +#[cfg(test)] +mod tests; /// Wait this amount of time if we hit rate limit on infura https://infura.io/docs/ethereum/json-rpc/ratelimits const RATE_LIMIT_BACKOFF_PERIOD: Duration = Duration::from_secs(30); @@ -111,23 +110,23 @@ enum TxCheckMode { /// report the incident to the log and then panic to prevent continue working in a probably /// erroneous conditions. Failure handling policy is determined by a corresponding callback, /// which can be changed if needed. -struct ETHSender { +struct ETHSender { /// Ongoing operations queue. ongoing_ops: VecDeque, /// Connection to the database. - db: Database, + db: DB, /// Ethereum intermediator. ethereum: ETH, /// Queue for ordered transaction processing. tx_queue: TxQueue, /// Utility for managing the gas price for transactions. - gas_adjuster: GasAdjuster, + gas_adjuster: GasAdjuster, /// Settings for the `ETHSender`. options: EthSenderOptions, } -impl ETHSender { - pub async fn new(options: EthSenderOptions, db: Database, ethereum: ETH) -> Self { +impl ETHSender { + pub async fn new(options: EthSenderOptions, db: DB, ethereum: ETH) -> Self { let mut connection = db .acquire_connection() .await @@ -305,7 +304,7 @@ impl ETHSender { } /// Stores the new operation in the database and sends the corresponding transaction. - async fn initialize_operation(&mut self, tx: TxData) -> Result<(), anyhow::Error> { + async fn initialize_operation(&mut self, tx: TxData) -> anyhow::Result<()> { let current_block = self.ethereum.block_number().await?; let deadline_block = self.get_deadline_block(current_block); let gas_price = self @@ -414,7 +413,7 @@ impl ETHSender { async fn perform_commitment_step( &mut self, op: &mut ETHOperation, - ) -> Result { + ) -> anyhow::Result { assert!( !op.used_tx_hashes.is_empty(), "OperationETHState should have at least one transaction" @@ -558,7 +557,7 @@ impl ETHSender { op: ÐOperation, tx_hash: &H256, current_block: u64, - ) -> Result { + ) -> anyhow::Result { let status = self.ethereum.get_tx_status(tx_hash).await?; let outcome = match status { @@ -596,10 +595,7 @@ impl ETHSender { } /// Creates a new Ethereum operation. - async fn sign_new_tx( - ethereum: Ð, - op: ÐOperation, - ) -> Result { + async fn sign_new_tx(ethereum: Ð, op: ÐOperation) -> anyhow::Result { let tx_options = { let mut options = Options::default(); options.nonce = Some(op.nonce); @@ -660,7 +656,7 @@ impl ETHSender { &mut self, deadline_block: u64, stuck_tx: &mut ETHOperation, - ) -> Result { + ) -> anyhow::Result { let tx_options = self.tx_options_from_stuck_tx(stuck_tx).await?; let raw_tx = stuck_tx.encoded_tx_data.clone(); @@ -678,7 +674,7 @@ impl ETHSender { async fn tx_options_from_stuck_tx( &mut self, stuck_tx: ÐOperation, - ) -> Result { + ) -> anyhow::Result { let old_tx_gas_price = stuck_tx.last_used_gas_price; let new_gas_price = self diff --git a/core/bin/zksync_eth_sender/src/main.rs b/core/bin/zksync_eth_sender/src/main.rs index 630ef098f1..9d357d5dae 100644 --- a/core/bin/zksync_eth_sender/src/main.rs +++ b/core/bin/zksync_eth_sender/src/main.rs @@ -22,7 +22,7 @@ async fn main() -> anyhow::Result<()> { .expect("Error setting Ctrl-C handler"); } - let pool = ConnectionPool::new(Some(ETH_SENDER_CONNECTION_POOL_SIZE)).await; + let pool = ConnectionPool::new(Some(ETH_SENDER_CONNECTION_POOL_SIZE)); let config_options = ConfigurationOptions::from_env(); let task_handle = run_eth_sender(pool, config_options); diff --git a/core/bin/zksync_eth_sender/src/tests/mock.rs b/core/bin/zksync_eth_sender/src/tests/mock.rs index b818e8ff0d..b81c9837af 100644 --- a/core/bin/zksync_eth_sender/src/tests/mock.rs +++ b/core/bin/zksync_eth_sender/src/tests/mock.rs @@ -1,39 +1,38 @@ //! Mocking utilities for tests. // Built-in deps -use std::cell::{Cell, RefCell}; -use std::collections::{HashMap, VecDeque}; +use crate::database::DatabaseInterface; +use crate::ethereum_interface::FailureInfo; +use crate::EthSenderOptions; +use std::collections::{BTreeMap, HashMap, VecDeque}; +use tokio::sync::RwLock; // External uses -use futures::channel::mpsc; use web3::contract::{tokens::Tokenize, Options}; use zksync_basic_types::{H256, U256}; // Workspace uses use zksync_eth_client::SignedCallResult; +use zksync_storage::StorageProcessor; use zksync_types::{ - config_options::EthSenderOptions, ethereum::{ETHOperation, EthOpId, InsertedOperationResponse, OperationType}, Action, Operation, }; // Local uses use super::ETHSender; -// use crate::eth_sender::database::DatabaseAccess; -use crate::eth_sender::ethereum_interface::EthereumInterface; -use crate::eth_sender::transactions::{ETHStats, ExecutedTxStatus}; -use crate::eth_sender::ETHSenderRequest; -use crate::utils::current_zksync_info::CurrentZkSyncInfo; -const CHANNEL_CAPACITY: usize = 16; +use crate::ethereum_interface::EthereumInterface; +use crate::transactions::{ETHStats, ExecutedTxStatus}; /// Mock database is capable of recording all the incoming requests for the further analysis. #[derive(Debug, Default)] -pub(in crate::eth_sender) struct MockDatabase { +pub(in crate) struct MockDatabase { restore_state: VecDeque, - unconfirmed_operations: RefCell>, - confirmed_operations: RefCell>, - nonce: Cell, - gas_price_limit: Cell, - pending_op_id: Cell, - stats: RefCell, + unconfirmed_operations: RwLock>, + unprocessed_operations: RwLock>, + confirmed_operations: RwLock>, + nonce: RwLock, + gas_price_limit: RwLock, + pending_op_id: RwLock, + stats: RwLock, } impl MockDatabase { @@ -48,64 +47,131 @@ impl MockDatabase { .fold(0, |acc, op| acc + op.used_tx_hashes.len()); let pending_op_id = restore_state.len(); - let unconfirmed_operations: HashMap = + let unconfirmed_operations: BTreeMap = restore_state.iter().map(|op| (op.id, op.clone())).collect(); - let gas_price_limit: u64 = - zksync_types::config_options::parse_env("ETH_GAS_PRICE_DEFAULT_LIMIT"); + let gas_price_limit: u64 = zksync_utils::parse_env("ETH_GAS_PRICE_DEFAULT_LIMIT"); Self { restore_state, - nonce: Cell::new(nonce as i64), - gas_price_limit: Cell::new(gas_price_limit.into()), - pending_op_id: Cell::new(pending_op_id as EthOpId), - stats: RefCell::new(stats), - unconfirmed_operations: RefCell::new(unconfirmed_operations), + nonce: RwLock::new(nonce as i64), + gas_price_limit: RwLock::new(gas_price_limit.into()), + pending_op_id: RwLock::new(pending_op_id as EthOpId), + stats: RwLock::new(stats), + unconfirmed_operations: RwLock::new(unconfirmed_operations), ..Default::default() } } + pub async fn update_gas_price_limit(&self, value: U256) -> anyhow::Result<()> { + let mut gas_price_limit = self.gas_price_limit.write().await; + (*gas_price_limit) = value; + + Ok(()) + } + + /// Simulates the operation of OperationsSchema, creates a new operation in the database. + pub async fn send_operation(&mut self, op: Operation) -> anyhow::Result<()> { + let nonce = op.id.expect("Nonce must be set for every tx"); + + self.unprocessed_operations.write().await.insert(nonce, op); + + Ok(()) + } + /// Ensures that the provided transaction is stored in the database and not confirmed yet. - pub fn assert_stored(&self, tx: ÐOperation) { - assert_eq!(self.unconfirmed_operations.borrow().get(&tx.id), Some(tx)); + pub async fn assert_stored(&self, tx: ÐOperation) { + assert_eq!( + self.unconfirmed_operations.read().await.get(&tx.id), + Some(tx) + ); - assert!(self.confirmed_operations.borrow().get(&tx.id).is_none()); + assert!(self.confirmed_operations.read().await.get(&tx.id).is_none()); } /// Ensures that the provided transaction is stored as confirmed. - pub fn assert_confirmed(&self, tx: ÐOperation) { - assert_eq!(self.confirmed_operations.borrow().get(&tx.id), Some(tx)); - - assert!(self.unconfirmed_operations.borrow().get(&tx.id).is_none()); + pub async fn assert_confirmed(&self, tx: ÐOperation) { + assert_eq!(self.confirmed_operations.read().await.get(&tx.id), Some(tx)); + + assert!(self + .unconfirmed_operations + .read() + .await + .get(&tx.id) + .is_none()); } - fn next_nonce(&self) -> Result { - let old_value = self.nonce.get(); - let new_value = old_value + 1; - self.nonce.set(new_value); + async fn next_nonce(&self) -> anyhow::Result { + let old_value = *(self.nonce.read().await); + let mut new_value = self.nonce.write().await; + *new_value = old_value + 1; Ok(old_value) } } -impl DatabaseAccess for MockDatabase { - fn restore_state(&self) -> Result<(VecDeque, Vec), anyhow::Error> { - Ok((self.restore_state.clone(), Vec::new())) +#[async_trait::async_trait] +impl DatabaseInterface for MockDatabase { + /// Creates a new database connection, used as a stub + /// and nothing will be sent through this connection. + async fn acquire_connection(&self) -> anyhow::Result> { + StorageProcessor::establish_connection().await + } + + /// Returns all unprocessed operations and then deletes them. + async fn load_new_operations( + &self, + _connection: &mut StorageProcessor<'_>, + ) -> anyhow::Result> { + let unprocessed_operations = self + .unprocessed_operations + .read() + .await + .values() + .cloned() + .collect::>(); + + self.unprocessed_operations.write().await.clear(); + + Ok(unprocessed_operations) + } + + async fn update_gas_price_params( + &self, + _connection: &mut StorageProcessor<'_>, + gas_price_limit: U256, + _average_gas_price: U256, + ) -> anyhow::Result<()> { + let mut new_gas_price_limit = self.gas_price_limit.write().await; + *new_gas_price_limit = gas_price_limit; + + Ok(()) } - fn save_new_eth_tx( + async fn restore_state( &self, + connection: &mut StorageProcessor<'_>, + ) -> anyhow::Result<(VecDeque, Vec)> { + Ok(( + self.restore_state.clone(), + self.load_new_operations(connection).await?, + )) + } + + async fn save_new_eth_tx( + &self, + _connection: &mut StorageProcessor<'_>, op_type: OperationType, op: Option, deadline_block: i64, used_gas_price: U256, encoded_tx_data: Vec, - ) -> Result { - let id = self.pending_op_id.get(); - let new_id = id + 1; - self.pending_op_id.set(new_id); + ) -> anyhow::Result { + let id = *(self.pending_op_id.read().await); + let mut pending_op_id = self.pending_op_id.write().await; + *pending_op_id = id + 1; - let nonce = self.next_nonce()?; + let nonce = self.next_nonce().await?; // Store with the assigned ID. let state = ETHOperation { @@ -121,7 +187,7 @@ impl DatabaseAccess for MockDatabase { final_hash: None, }; - self.unconfirmed_operations.borrow_mut().insert(id, state); + self.unconfirmed_operations.write().await.insert(id, state); let response = InsertedOperationResponse { id, @@ -132,15 +198,21 @@ impl DatabaseAccess for MockDatabase { } /// Adds a tx hash entry associated with some Ethereum operation to the database. - fn add_hash_entry(&self, eth_op_id: i64, hash: &H256) -> Result<(), anyhow::Error> { + async fn add_hash_entry( + &self, + _connection: &mut StorageProcessor<'_>, + eth_op_id: i64, + hash: &H256, + ) -> anyhow::Result<()> { assert!( self.unconfirmed_operations - .borrow() + .read() + .await .contains_key(ð_op_id), "Attempt to update tx that is not unconfirmed" ); - let mut ops = self.unconfirmed_operations.borrow_mut(); + let mut ops = self.unconfirmed_operations.write().await; let mut op = ops[ð_op_id].clone(); op.used_tx_hashes.push(*hash); ops.insert(eth_op_id, op); @@ -148,20 +220,22 @@ impl DatabaseAccess for MockDatabase { Ok(()) } - fn update_eth_tx( + async fn update_eth_tx( &self, + _connection: &mut StorageProcessor<'_>, eth_op_id: EthOpId, new_deadline_block: i64, new_gas_value: U256, - ) -> Result<(), anyhow::Error> { + ) -> anyhow::Result<()> { assert!( self.unconfirmed_operations - .borrow() + .read() + .await .contains_key(ð_op_id), "Attempt to update tx that is not unconfirmed" ); - let mut ops = self.unconfirmed_operations.borrow_mut(); + let mut ops = self.unconfirmed_operations.write().await; let mut op = ops[ð_op_id].clone(); op.last_deadline_block = new_deadline_block as u64; op.last_used_gas_price = new_gas_value; @@ -170,8 +244,13 @@ impl DatabaseAccess for MockDatabase { Ok(()) } - fn confirm_operation(&self, hash: &H256) -> Result<(), anyhow::Error> { - let mut unconfirmed_operations = self.unconfirmed_operations.borrow_mut(); + async fn confirm_operation( + &self, + _connection: &mut StorageProcessor<'_>, + hash: &H256, + _op: ÐOperation, + ) -> anyhow::Result<()> { + let mut unconfirmed_operations = self.unconfirmed_operations.write().await; let mut op_idx: Option = None; for operation in unconfirmed_operations.values_mut() { if operation.used_tx_hashes.contains(hash) { @@ -190,41 +269,66 @@ impl DatabaseAccess for MockDatabase { let operation = unconfirmed_operations.remove(&op_idx).unwrap(); self.confirmed_operations - .borrow_mut() + .write() + .await .insert(op_idx, operation); Ok(()) } - fn load_gas_price_limit(&self) -> Result { - Ok(self.gas_price_limit.get()) + async fn load_gas_price_limit( + &self, + _connection: &mut StorageProcessor<'_>, + ) -> anyhow::Result { + Ok(*self.gas_price_limit.read().await) } - fn update_gas_price_limit(&self, value: U256) -> Result<(), anyhow::Error> { - self.gas_price_limit.set(value); - - Ok(()) + async fn load_stats(&self, _connection: &mut StorageProcessor<'_>) -> anyhow::Result { + Ok(self.stats.read().await.clone()) } - fn load_stats(&self) -> Result { - Ok(self.stats.borrow().clone()) - } + async fn is_previous_operation_confirmed( + &self, + _connection: &mut StorageProcessor<'_>, + op: ÐOperation, + ) -> anyhow::Result { + let confirmed = match op.op_type { + OperationType::Commit | OperationType::Verify => { + let op = op.op.as_ref().unwrap(); + // We're checking previous block, so for the edge case of first block we can say that it was confirmed. + let block_to_check = if op.block.block_number > 1 { + op.block.block_number - 1 + } else { + return Ok(true); + }; + + let confirmed_operations = self.confirmed_operations.read().await.clone(); + let maybe_operation = confirmed_operations.get(&(block_to_check as i64)); + + let operation = match maybe_operation { + Some(op) => op, + None => return Ok(false), + }; + + operation.confirmed + } + OperationType::Withdraw => { + // Withdrawals aren't actually sequential, so we don't really care. + true + } + }; - fn transaction(&self, f: F) -> Result - where - F: FnOnce() -> Result, - { - f() + Ok(confirmed) } } /// Mock Ethereum client is capable of recording all the incoming requests for the further analysis. #[derive(Debug)] -pub(in crate::eth_sender) struct MockEthereum { +pub(in crate) struct MockEthereum { pub block_number: u64, pub gas_price: U256, - pub tx_statuses: RefCell>, - pub sent_txs: RefCell>, + pub tx_statuses: RwLock>, + pub sent_txs: RwLock>, } impl Default for MockEthereum { @@ -254,21 +358,21 @@ impl MockEthereum { } /// Checks that there was a request to send the provided transaction. - pub fn assert_sent(&self, hash: &H256) { + pub async fn assert_sent(&self, hash: &H256) { assert!( - self.sent_txs.borrow().get(hash).is_some(), + self.sent_txs.read().await.get(hash).is_some(), format!("Transaction with hash {:?} was not sent", hash), ); } /// Adds an response for the sent transaction for `ETHSender` to receive. - pub fn add_execution(&mut self, hash: &H256, status: &ExecutedTxStatus) { - self.tx_statuses.borrow_mut().insert(*hash, status.clone()); + pub async fn add_execution(&mut self, hash: &H256, status: &ExecutedTxStatus) { + self.tx_statuses.write().await.insert(*hash, status.clone()); } /// Increments the blocks by a provided `confirmations` and marks the sent transaction /// as a success. - pub fn add_successfull_execution(&mut self, tx_hash: H256, confirmations: u64) { + pub async fn add_successfull_execution(&mut self, tx_hash: H256, confirmations: u64) { self.block_number += confirmations; let status = ExecutedTxStatus { @@ -276,11 +380,11 @@ impl MockEthereum { success: true, receipt: None, }; - self.tx_statuses.borrow_mut().insert(tx_hash, status); + self.tx_statuses.write().await.insert(tx_hash, status); } /// Same as `add_successfull_execution`, but marks the transaction as a failure. - pub fn add_failed_execution(&mut self, hash: &H256, confirmations: u64) { + pub async fn add_failed_execution(&mut self, hash: &H256, confirmations: u64) { self.block_number += confirmations; let status = ExecutedTxStatus { @@ -288,26 +392,28 @@ impl MockEthereum { success: false, receipt: Some(Default::default()), }; - self.tx_statuses.borrow_mut().insert(*hash, status); + self.tx_statuses.write().await.insert(*hash, status); } } +#[async_trait::async_trait] impl EthereumInterface for MockEthereum { - fn get_tx_status(&self, hash: &H256) -> Result, anyhow::Error> { - Ok(self.tx_statuses.borrow().get(hash).cloned()) + async fn get_tx_status(&self, hash: &H256) -> anyhow::Result> { + Ok(self.tx_statuses.read().await.get(hash).cloned()) } - fn block_number(&self) -> Result { + async fn block_number(&self) -> anyhow::Result { Ok(self.block_number) } - fn gas_price(&self) -> Result { + async fn gas_price(&self) -> anyhow::Result { Ok(self.gas_price) } - fn send_tx(&self, signed_tx: &SignedCallResult) -> Result<(), anyhow::Error> { + async fn send_tx(&self, signed_tx: &SignedCallResult) -> anyhow::Result<()> { self.sent_txs - .borrow_mut() + .write() + .await .insert(signed_tx.hash, signed_tx.clone()); Ok(()) @@ -317,11 +423,11 @@ impl EthereumInterface for MockEthereum { ethabi::encode(params.into_tokens().as_ref()) } - fn sign_prepared_tx( + async fn sign_prepared_tx( &self, raw_tx: Vec, options: Options, - ) -> Result { + ) -> anyhow::Result { let gas_price = options.gas_price.unwrap_or(self.gas_price); let nonce = options.nonce.expect("Nonce must be set for every tx"); @@ -339,62 +445,47 @@ impl EthereumInterface for MockEthereum { hash, }) } + + async fn failure_reason(&self, _tx_hash: H256) -> Option { + None + } } /// Creates a default `ETHSender` with mock Ethereum connection/database and no operations in DB. /// Returns the `ETHSender` itself along with communication channels to interact with it. -pub(in crate::eth_sender) fn default_eth_sender() -> ( - ETHSender, - mpsc::Sender, - mpsc::Receiver, -) { - build_eth_sender(1, Vec::new(), Default::default()) +pub(in crate) async fn default_eth_sender() -> ETHSender { + build_eth_sender(1, Vec::new(), Default::default()).await } /// Creates an `ETHSender` with mock Ethereum connection/database and no operations in DB /// which supports multiple transactions in flight. /// Returns the `ETHSender` itself along with communication channels to interact with it. -pub(in crate::eth_sender) fn concurrent_eth_sender( +pub(in crate) async fn concurrent_eth_sender( max_txs_in_flight: u64, -) -> ( - ETHSender, - mpsc::Sender, - mpsc::Receiver, -) { - build_eth_sender(max_txs_in_flight, Vec::new(), Default::default()) +) -> ETHSender { + build_eth_sender(max_txs_in_flight, Vec::new(), Default::default()).await } /// Creates an `ETHSender` with mock Ethereum connection/database and restores its state "from DB". /// Returns the `ETHSender` itself along with communication channels to interact with it. -pub(in crate::eth_sender) fn restored_eth_sender( +pub(in crate) async fn restored_eth_sender( restore_state: impl IntoIterator, stats: ETHStats, -) -> ( - ETHSender, - mpsc::Sender, - mpsc::Receiver, -) { +) -> ETHSender { const MAX_TXS_IN_FLIGHT: u64 = 1; - build_eth_sender(MAX_TXS_IN_FLIGHT, restore_state, stats) + build_eth_sender(MAX_TXS_IN_FLIGHT, restore_state, stats).await } /// Helper method for configurable creation of `ETHSender`. -fn build_eth_sender( +async fn build_eth_sender( max_txs_in_flight: u64, restore_state: impl IntoIterator, stats: ETHStats, -) -> ( - ETHSender, - mpsc::Sender, - mpsc::Receiver, -) { +) -> ETHSender { let ethereum = MockEthereum::default(); let db = MockDatabase::with_restorable_state(restore_state, stats); - let (operation_sender, operation_receiver) = mpsc::channel(CHANNEL_CAPACITY); - let (notify_sender, notify_receiver) = mpsc::channel(CHANNEL_CAPACITY); - let options = EthSenderOptions { max_txs_in_flight, expected_wait_time_block: super::EXPECTED_WAIT_TIME_BLOCKS, @@ -403,23 +494,13 @@ fn build_eth_sender( is_enabled: true, }; - let current_zksync_info = CurrentZkSyncInfo::with_block_number(0); - let eth_sender = ETHSender::new( - options, - db, - ethereum, - operation_receiver, - notify_sender, - current_zksync_info, - ); - - (eth_sender, operation_sender, notify_receiver) + ETHSender::new(options, db, ethereum).await } /// Behaves the same as `ETHSender::sign_new_tx`, but does not affect nonce. /// This method should be used to create expected tx copies which won't affect /// the internal `ETHSender` state. -pub(in crate::eth_sender) fn create_signed_tx( +pub(in crate) async fn create_signed_tx( id: i64, eth_sender: ÐSender, operation: &Operation, @@ -433,6 +514,7 @@ pub(in crate::eth_sender) fn create_signed_tx( let signed_tx = eth_sender .ethereum .sign_prepared_tx(raw_tx.clone(), options) + .await .unwrap(); let op_type = match operation.action { @@ -455,7 +537,7 @@ pub(in crate::eth_sender) fn create_signed_tx( } /// Creates an `ETHOperation` object for a withdraw operation. -pub(in crate::eth_sender) fn create_signed_withdraw_tx( +pub(in crate) async fn create_signed_withdraw_tx( id: i64, eth_sender: ÐSender, deadline_block: u64, @@ -471,6 +553,7 @@ pub(in crate::eth_sender) fn create_signed_withdraw_tx( let signed_tx = eth_sender .ethereum .sign_prepared_tx(raw_tx.clone(), options) + .await .unwrap(); let op_type = OperationType::Withdraw; diff --git a/core/bin/zksync_eth_sender/src/tests/mod.rs b/core/bin/zksync_eth_sender/src/tests/mod.rs index bf81634e3c..69bee64f32 100644 --- a/core/bin/zksync_eth_sender/src/tests/mod.rs +++ b/core/bin/zksync_eth_sender/src/tests/mod.rs @@ -1,8 +1,3 @@ -// External uses -use tokio::runtime::Builder; -use tokio::time::timeout; -// Workspace uses -use zksync_types::ethereum::ETHOperation; // Local uses use self::mock::{ concurrent_eth_sender, create_signed_tx, create_signed_withdraw_tx, default_eth_sender, @@ -12,10 +7,6 @@ use super::{ transactions::{ETHStats, ExecutedTxStatus, TxCheckOutcome}, ETHSender, TxCheckMode, }; -use crate::eth_sender::ethereum_interface::EthereumInterface; -use crate::eth_sender::ETHSenderRequest; -use futures::executor::block_on; -use std::time::Duration; const EXPECTED_WAIT_TIME_BLOCKS: u64 = 30; const WAIT_CONFIRMATIONS: u64 = 1; @@ -23,38 +14,19 @@ const WAIT_CONFIRMATIONS: u64 = 1; pub mod mock; mod test_data; -fn retrieve_all_operations( - eth_sender: &mut ETHSender, -) { - async fn process_with_timeout( - eth_sender: &mut ETHSender, - ) { - timeout(Duration::from_secs(1), eth_sender.process_requests()) - .await - .unwrap_or_default() - } - - let mut runtime = Builder::new() - .basic_scheduler() - .enable_time() - .build() - .expect("Tokio runtime build"); - runtime.block_on(process_with_timeout(eth_sender)); -} - /// Basic test that `ETHSender` creation does not panic and initializes correctly. -#[test] -fn basic_test() { - let (eth_sender, _, _) = default_eth_sender(); +#[tokio::test] +async fn basic_test() { + let eth_sender = default_eth_sender().await; // Check that there are no unconfirmed operations by default. assert!(eth_sender.ongoing_ops.is_empty()); } /// Checks that deadline block is chosen according to the expected policy. -#[test] -fn deadline_block() { - let (eth_sender, _, _) = default_eth_sender(); +#[tokio::test] +async fn deadline_block() { + let eth_sender = default_eth_sender().await; assert_eq!(eth_sender.get_deadline_block(0), EXPECTED_WAIT_TIME_BLOCKS); assert_eq!( @@ -67,25 +39,32 @@ fn deadline_block() { /// `TxCheckOutcome` correctly. /// /// Here we check every possible output of the `check_transaction_state` method. -#[test] -fn transaction_state() { - let (mut eth_sender, _, _) = default_eth_sender(); +#[tokio::test] +async fn transaction_state() { + let mut eth_sender = default_eth_sender().await; let current_block = eth_sender.ethereum.block_number; let deadline_block = eth_sender.get_deadline_block(current_block); - let operations: Vec = vec![ + let operations = vec![ test_data::commit_operation(0), // Will be committed. test_data::commit_operation(1), // Will be pending because of not enough confirmations. test_data::commit_operation(2), // Will be failed. test_data::commit_operation(3), // Will be stuck. test_data::commit_operation(4), // Will be pending due no response. - ] - .iter() - .enumerate() - .map(|(eth_op_id, op)| { - let nonce = eth_op_id as i64; - create_signed_tx(eth_op_id as i64, ð_sender, op, deadline_block, nonce) - }) - .collect(); + ]; + let mut eth_operations = Vec::with_capacity(operations.len()); + + for (eth_op_id, op) in operations.iter().enumerate() { + eth_operations.push( + create_signed_tx( + eth_op_id as i64, + ð_sender, + op, + deadline_block, + eth_op_id as i64, + ) + .await, + ) + } // Committed operation. let committed_response = ExecutedTxStatus { @@ -95,7 +74,8 @@ fn transaction_state() { }; eth_sender .ethereum - .add_execution(&operations[0].used_tx_hashes[0], &committed_response); + .add_execution(ð_operations[0].used_tx_hashes[0], &committed_response) + .await; // Pending operation. let pending_response = ExecutedTxStatus { @@ -105,7 +85,8 @@ fn transaction_state() { }; eth_sender .ethereum - .add_execution(&operations[1].used_tx_hashes[0], &pending_response); + .add_execution(ð_operations[1].used_tx_hashes[0], &pending_response) + .await; // Failed operation. let failed_response = ExecutedTxStatus { @@ -115,19 +96,19 @@ fn transaction_state() { }; eth_sender .ethereum - .add_execution(&operations[2].used_tx_hashes[0], &failed_response); - - // Checks. + .add_execution(ð_operations[2].used_tx_hashes[0], &failed_response) + .await; // Committed operation. assert_eq!( eth_sender .check_transaction_state( TxCheckMode::Latest, - &operations[0], - &operations[0].used_tx_hashes[0], + ð_operations[0], + ð_operations[0].used_tx_hashes[0], current_block + committed_response.confirmations, ) + .await .unwrap(), TxCheckOutcome::Committed ); @@ -137,10 +118,11 @@ fn transaction_state() { eth_sender .check_transaction_state( TxCheckMode::Latest, - &operations[1], - &operations[1].used_tx_hashes[0], + ð_operations[1], + ð_operations[1].used_tx_hashes[0], current_block + pending_response.confirmations, ) + .await .unwrap(), TxCheckOutcome::Pending ); @@ -150,10 +132,11 @@ fn transaction_state() { eth_sender .check_transaction_state( TxCheckMode::Latest, - &operations[2], - &operations[2].used_tx_hashes[0], + ð_operations[2], + ð_operations[2].used_tx_hashes[0], current_block + failed_response.confirmations, ) + .await .unwrap(), TxCheckOutcome::Failed(Default::default()) ); @@ -163,10 +146,11 @@ fn transaction_state() { eth_sender .check_transaction_state( TxCheckMode::Latest, - &operations[3], - &operations[3].used_tx_hashes[0], + ð_operations[3], + ð_operations[3].used_tx_hashes[0], current_block + EXPECTED_WAIT_TIME_BLOCKS, ) + .await .unwrap(), TxCheckOutcome::Stuck ); @@ -176,10 +160,11 @@ fn transaction_state() { eth_sender .check_transaction_state( TxCheckMode::Latest, - &operations[4], - &operations[4].used_tx_hashes[0], + ð_operations[4], + ð_operations[4].used_tx_hashes[0], current_block + EXPECTED_WAIT_TIME_BLOCKS - 1, ) + .await .unwrap(), TxCheckOutcome::Pending ); @@ -189,10 +174,11 @@ fn transaction_state() { eth_sender .check_transaction_state( TxCheckMode::Old, - &operations[4], - &operations[4].used_tx_hashes[0], + ð_operations[4], + ð_operations[4].used_tx_hashes[0], current_block + EXPECTED_WAIT_TIME_BLOCKS - 1, ) + .await .unwrap(), TxCheckOutcome::Stuck ); @@ -203,9 +189,9 @@ fn transaction_state() { /// - they are successfully committed to the Ethereum; /// - `completeWithdrawals` tx is sent to the Ethereum; /// - notification is sent after `verify` operation is committed. -#[test] -fn operation_commitment_workflow() { - let (mut eth_sender, mut sender, mut receiver) = default_eth_sender(); +#[tokio::test] +async fn operation_commitment_workflow() { + let mut eth_sender = default_eth_sender().await; // In this test we will run one commit and one verify operation and should // obtain a notification about the operation being completed in the end. @@ -214,19 +200,20 @@ fn operation_commitment_workflow() { test_data::verify_operation(0), ]; - let verify_operation_id = operations[1].id; - for (eth_op_id, operation) in operations.iter().enumerate() { let nonce = eth_op_id as i64; // Send an operation to `ETHSender`. - sender - .try_send(ETHSenderRequest::SendOperation(operation.clone())) + eth_sender + .db + .send_operation(operation.clone()) + .await .unwrap(); // Retrieve it there and then process. - retrieve_all_operations(&mut eth_sender); - block_on(eth_sender.proceed_next_operations()); + eth_sender.load_new_operations().await; + + eth_sender.proceed_next_operations().await; // Now we should see that transaction is stored in the database and sent to the Ethereum. let deadline_block = eth_sender.get_deadline_block(eth_sender.ethereum.block_number); @@ -236,60 +223,57 @@ fn operation_commitment_workflow() { operation, deadline_block, nonce, - ); + ) + .await; expected_tx.id = eth_op_id as i64; // We have to set the ID manually. - eth_sender.db.assert_stored(&expected_tx); + eth_sender.db.assert_stored(&expected_tx).await; + eth_sender .ethereum - .assert_sent(&expected_tx.used_tx_hashes[0]); - - // No confirmation should be done yet. - assert!(receiver.try_next().is_err()); + .assert_sent(&expected_tx.used_tx_hashes[0]) + .await; // Increment block, make the transaction look successfully executed, and process the // operation again. eth_sender .ethereum - .add_successfull_execution(expected_tx.used_tx_hashes[0], WAIT_CONFIRMATIONS); - block_on(eth_sender.proceed_next_operations()); + .add_successfull_execution(expected_tx.used_tx_hashes[0], WAIT_CONFIRMATIONS) + .await; + + eth_sender.proceed_next_operations().await; // Check that operation is confirmed. expected_tx.confirmed = true; expected_tx.final_hash = Some(expected_tx.used_tx_hashes[0]); - eth_sender.db.assert_confirmed(&expected_tx); + eth_sender.db.assert_confirmed(&expected_tx).await; } - // Process the next operation and check that `completeWithdrawals` transaction is stored and sent. - block_on(eth_sender.proceed_next_operations()); + eth_sender.proceed_next_operations().await; let eth_op_idx = operations.len() as i64; let nonce = eth_op_idx; let deadline_block = eth_sender.get_deadline_block(eth_sender.ethereum.block_number); let mut withdraw_op_tx = - create_signed_withdraw_tx(eth_op_idx, ð_sender, deadline_block, nonce); + create_signed_withdraw_tx(eth_op_idx, ð_sender, deadline_block, nonce).await; - eth_sender.db.assert_stored(&withdraw_op_tx); + eth_sender.db.assert_stored(&withdraw_op_tx).await; eth_sender .ethereum - .assert_sent(&withdraw_op_tx.used_tx_hashes[0]); + .assert_sent(&withdraw_op_tx.used_tx_hashes[0]) + .await; // Mark `completeWithdrawals` as completed. eth_sender .ethereum - .add_successfull_execution(withdraw_op_tx.used_tx_hashes[0], WAIT_CONFIRMATIONS); - block_on(eth_sender.proceed_next_operations()); + .add_successfull_execution(withdraw_op_tx.used_tx_hashes[0], WAIT_CONFIRMATIONS) + .await; + eth_sender.proceed_next_operations().await; // Check that `completeWithdrawals` is completed in the DB. withdraw_op_tx.confirmed = true; withdraw_op_tx.final_hash = Some(withdraw_op_tx.used_tx_hashes[0]); - eth_sender.db.assert_confirmed(&withdraw_op_tx); - - // We should be notified about verify operation being completed. - assert_eq!( - receiver.try_next().unwrap().unwrap().id, - verify_operation_id - ); + eth_sender.db.assert_confirmed(&withdraw_op_tx).await; } /// A simple scenario for a stuck transaction: @@ -297,27 +281,31 @@ fn operation_commitment_workflow() { /// - It is not processed after some blocks. /// - `ETHSender` creates a new transaction with increased gas. /// - This transaction is completed successfully. -#[test] -fn stuck_transaction() { - let (mut eth_sender, mut sender, _) = default_eth_sender(); +#[tokio::test] +async fn stuck_transaction() { + let mut eth_sender = default_eth_sender().await; // Workflow for the test is similar to `operation_commitment_workflow`. let operation = test_data::commit_operation(0); - sender - .try_send(ETHSenderRequest::SendOperation(operation.clone())) + // Send an operation to `ETHSender`. + eth_sender + .db + .send_operation(operation.clone()) + .await .unwrap(); - retrieve_all_operations(&mut eth_sender); - block_on(eth_sender.proceed_next_operations()); + eth_sender.load_new_operations().await; + eth_sender.proceed_next_operations().await; let eth_op_id = 0; let nonce = 0; let deadline_block = eth_sender.get_deadline_block(eth_sender.ethereum.block_number); - let mut stuck_tx = create_signed_tx(eth_op_id, ð_sender, &operation, deadline_block, nonce); + let mut stuck_tx = + create_signed_tx(eth_op_id, ð_sender, &operation, deadline_block, nonce).await; // Skip some blocks and expect sender to send a new tx. eth_sender.ethereum.block_number += EXPECTED_WAIT_TIME_BLOCKS; - block_on(eth_sender.proceed_next_operations()); + eth_sender.proceed_next_operations().await; // Check that new transaction is sent (and created based on the previous stuck tx). let expected_sent_tx = eth_sender @@ -325,21 +313,26 @@ fn stuck_transaction() { eth_sender.get_deadline_block(eth_sender.ethereum.block_number), &mut stuck_tx, ) + .await .unwrap(); - eth_sender.db.assert_stored(&stuck_tx); - eth_sender.ethereum.assert_sent(&expected_sent_tx.hash); + eth_sender.db.assert_stored(&stuck_tx).await; + eth_sender + .ethereum + .assert_sent(&expected_sent_tx.hash) + .await; // Increment block, make the transaction look successfully executed, and process the // operation again. eth_sender .ethereum - .add_successfull_execution(stuck_tx.used_tx_hashes[1], WAIT_CONFIRMATIONS); - block_on(eth_sender.proceed_next_operations()); + .add_successfull_execution(stuck_tx.used_tx_hashes[1], WAIT_CONFIRMATIONS) + .await; + eth_sender.proceed_next_operations().await; // Check that operation is confirmed (we set the final hash to the second sent tx). stuck_tx.confirmed = true; stuck_tx.final_hash = Some(stuck_tx.used_tx_hashes[1]); - eth_sender.db.assert_confirmed(&stuck_tx); + eth_sender.db.assert_confirmed(&stuck_tx).await; } /// This test verifies that with multiple operations received all-together, @@ -347,9 +340,9 @@ fn stuck_transaction() { /// the previous one is committed. /// /// This test includes all three operation types (commit, verify and withdraw). -#[test] -fn operations_order() { - let (mut eth_sender, mut sender, mut receiver) = default_eth_sender(); +#[tokio::test] +async fn operations_order() { + let mut eth_sender = default_eth_sender().await; // We send multiple the operations at once to the channel. let operations_count = 3; @@ -382,7 +375,8 @@ fn operations_order() { commit_operation, deadline_block, nonce, - ); + ) + .await; expected_txs.push(commit_op_tx); @@ -398,7 +392,8 @@ fn operations_order() { verify_operation, deadline_block, nonce, - ); + ) + .await; expected_txs.push(verify_op_tx); @@ -409,88 +404,90 @@ fn operations_order() { let nonce = eth_op_idx; let withdraw_op_tx = - create_signed_withdraw_tx(eth_op_idx, ð_sender, deadline_block, nonce); + create_signed_withdraw_tx(eth_op_idx, ð_sender, deadline_block, nonce).await; expected_txs.push(withdraw_op_tx); } for operation in operations.iter() { - sender - .try_send(ETHSenderRequest::SendOperation(operation.clone())) + eth_sender + .db + .send_operation(operation.clone()) + .await .unwrap(); } - retrieve_all_operations(&mut eth_sender); + eth_sender.load_new_operations().await; // Then we go through the operations and check that the order of operations is preserved. for mut tx in expected_txs.into_iter() { let current_tx_hash = tx.used_tx_hashes[0]; - block_on(eth_sender.proceed_next_operations()); + eth_sender.proceed_next_operations().await; // Check that current expected tx is stored. - eth_sender.db.assert_stored(&tx); - eth_sender.ethereum.assert_sent(¤t_tx_hash); + eth_sender.db.assert_stored(&tx).await; + eth_sender.ethereum.assert_sent(¤t_tx_hash).await; // Mark the tx as successfully eth_sender .ethereum - .add_successfull_execution(current_tx_hash, WAIT_CONFIRMATIONS); - block_on(eth_sender.proceed_next_operations()); + .add_successfull_execution(current_tx_hash, WAIT_CONFIRMATIONS) + .await; + eth_sender.proceed_next_operations().await; // Update the fields in the tx and check if it's confirmed. tx.confirmed = true; tx.final_hash = Some(current_tx_hash); - eth_sender.db.assert_confirmed(&tx); - } - - // We should be notified about all the verify operations being completed. - for _ in 0..operations_count { - assert!(receiver.try_next().unwrap().is_some()); + eth_sender.db.assert_confirmed(&tx).await; } } /// Check that upon a transaction failure the incident causes a panic by default. -#[test] +#[tokio::test] #[should_panic(expected = "Cannot operate after unexpected TX failure")] -fn transaction_failure() { - let (mut eth_sender, mut sender, _) = default_eth_sender(); +async fn transaction_failure() { + let mut eth_sender = default_eth_sender().await; // Workflow for the test is similar to `operation_commitment_workflow`. let operation = test_data::commit_operation(0); - sender - .try_send(ETHSenderRequest::SendOperation(operation.clone())) + eth_sender + .db + .send_operation(operation.clone()) + .await .unwrap(); let eth_op_id = 0; let nonce = 0; let deadline_block = eth_sender.get_deadline_block(eth_sender.ethereum.block_number); - let failing_tx = create_signed_tx(eth_op_id, ð_sender, &operation, deadline_block, nonce); + let failing_tx = + create_signed_tx(eth_op_id, ð_sender, &operation, deadline_block, nonce).await; - retrieve_all_operations(&mut eth_sender); - block_on(eth_sender.proceed_next_operations()); + eth_sender.load_new_operations().await; + eth_sender.proceed_next_operations().await; eth_sender .ethereum - .add_failed_execution(&failing_tx.used_tx_hashes[0], WAIT_CONFIRMATIONS); - block_on(eth_sender.proceed_next_operations()); + .add_failed_execution(&failing_tx.used_tx_hashes[0], WAIT_CONFIRMATIONS) + .await; + eth_sender.proceed_next_operations().await; } /// Check that after recovering state with several non-processed operations /// they will be processed normally. -#[test] -fn restore_state() { +#[tokio::test] +async fn restore_state() { let (operations, stored_operations) = { // This `eth_sender` is required to generate the input only. - let (eth_sender, _, _) = default_eth_sender(); + let eth_sender = default_eth_sender().await; let commit_op = test_data::commit_operation(0); let verify_op = test_data::verify_operation(0); let deadline_block = eth_sender.get_deadline_block(1); - let commit_op_tx = create_signed_tx(0, ð_sender, &commit_op, deadline_block, 0); + let commit_op_tx = create_signed_tx(0, ð_sender, &commit_op, deadline_block, 0).await; let deadline_block = eth_sender.get_deadline_block(2); - let verify_op_tx = create_signed_tx(1, ð_sender, &verify_op, deadline_block, 1); + let verify_op_tx = create_signed_tx(1, ð_sender, &verify_op, deadline_block, 1).await; let operations = vec![commit_op, verify_op]; let stored_operations = vec![commit_op_tx, verify_op_tx]; @@ -503,14 +500,14 @@ fn restore_state() { verify_ops: 1, withdraw_ops: 0, }; - let (mut eth_sender, _, mut receiver) = restored_eth_sender(stored_operations, stats); + let mut eth_sender = restored_eth_sender(stored_operations, stats).await; for (eth_op_id, operation) in operations.iter().enumerate() { // Note that we DO NOT send an operation to `ETHSender` and neither receive it. // We do process operations restored from the DB though. // The rest of this test is the same as in `operation_commitment_workflow`. - block_on(eth_sender.proceed_next_operations()); + eth_sender.proceed_next_operations().await; let deadline_block = eth_sender.get_deadline_block(eth_sender.ethereum.block_number); let nonce = eth_op_id as i64; @@ -520,77 +517,82 @@ fn restore_state() { operation, deadline_block, nonce, - ); + ) + .await; expected_tx.id = eth_op_id as i64; - eth_sender.db.assert_stored(&expected_tx); + eth_sender.db.assert_stored(&expected_tx).await; eth_sender .ethereum - .add_successfull_execution(expected_tx.used_tx_hashes[0], WAIT_CONFIRMATIONS); - block_on(eth_sender.proceed_next_operations()); + .add_successfull_execution(expected_tx.used_tx_hashes[0], WAIT_CONFIRMATIONS) + .await; + eth_sender.proceed_next_operations().await; expected_tx.confirmed = true; expected_tx.final_hash = Some(expected_tx.used_tx_hashes[0]); - eth_sender.db.assert_confirmed(&expected_tx); + eth_sender.db.assert_confirmed(&expected_tx).await; } - - assert!(receiver.try_next().unwrap().is_some()); } /// Checks that even after getting the first transaction stuck and sending the next /// one, confirmation for the first (stuck) transaction is processed and leads /// to the operation commitment. -#[test] -fn confirmations_independence() { +#[tokio::test] +async fn confirmations_independence() { // Workflow in the test is the same as in `stuck_transaction`, except for the fact // that confirmation is obtained for the stuck transaction instead of the latter one. - let (mut eth_sender, mut sender, _) = default_eth_sender(); + let mut eth_sender = default_eth_sender().await; let operation = test_data::commit_operation(0); - sender - .try_send(ETHSenderRequest::SendOperation(operation.clone())) + eth_sender + .db + .send_operation(operation.clone()) + .await .unwrap(); - retrieve_all_operations(&mut eth_sender); - block_on(eth_sender.proceed_next_operations()); + eth_sender.load_new_operations().await; + eth_sender.proceed_next_operations().await; let eth_op_id = 0; let nonce = 0; let deadline_block = eth_sender.get_deadline_block(eth_sender.ethereum.block_number); - let mut stuck_tx = create_signed_tx(eth_op_id, ð_sender, &operation, deadline_block, nonce); + let mut stuck_tx = + create_signed_tx(eth_op_id, ð_sender, &operation, deadline_block, nonce).await; eth_sender.ethereum.block_number += EXPECTED_WAIT_TIME_BLOCKS; - block_on(eth_sender.proceed_next_operations()); + eth_sender.proceed_next_operations().await; let next_tx = eth_sender .create_supplement_tx( eth_sender.get_deadline_block(eth_sender.ethereum.block_number), &mut stuck_tx, ) + .await .unwrap(); - eth_sender.db.assert_stored(&stuck_tx); - eth_sender.ethereum.assert_sent(&next_tx.hash); + eth_sender.db.assert_stored(&stuck_tx).await; + eth_sender.ethereum.assert_sent(&next_tx.hash).await; // Add a confirmation for a *stuck* transaction. eth_sender .ethereum - .add_successfull_execution(stuck_tx.used_tx_hashes[0], WAIT_CONFIRMATIONS); - block_on(eth_sender.proceed_next_operations()); + .add_successfull_execution(stuck_tx.used_tx_hashes[0], WAIT_CONFIRMATIONS) + .await; + eth_sender.proceed_next_operations().await; // Check that operation is confirmed (we set the final hash to the *first* sent tx). stuck_tx.confirmed = true; stuck_tx.final_hash = Some(stuck_tx.used_tx_hashes[0]); - eth_sender.db.assert_confirmed(&stuck_tx); + eth_sender.db.assert_confirmed(&stuck_tx).await; } /// This test is the same as `operations_order`, but configures ETH sender /// to use 3 transactions in flight, and checks that they are being sent concurrently. -#[test] -fn concurrent_operations_order() { +#[tokio::test] +async fn concurrent_operations_order() { const MAX_TXS_IN_FLIGHT: u64 = 3; - let (mut eth_sender, mut sender, mut receiver) = concurrent_eth_sender(MAX_TXS_IN_FLIGHT); + let mut eth_sender = concurrent_eth_sender(MAX_TXS_IN_FLIGHT).await; // We send multiple the operations at once to the channel. let operations_count = 3; @@ -605,6 +607,7 @@ fn concurrent_operations_order() { // the logic of ID calculating is (i * 3), (i * 3 + 1), (i * 3 + 2). // On the first iteration the indices 0, 1 and 2 will be taken, then it // will be 3, 4 and 5, etc. + for (idx, (commit_operation, verify_operation)) in commit_operations.iter().zip(verify_operations).enumerate() { @@ -625,7 +628,8 @@ fn concurrent_operations_order() { commit_operation, deadline_block, nonce, - ); + ) + .await; expected_txs.push(commit_op_tx); @@ -639,7 +643,8 @@ fn concurrent_operations_order() { verify_operation, deadline_block, nonce, - ); + ) + .await; expected_txs.push(verify_op_tx); @@ -650,11 +655,10 @@ fn concurrent_operations_order() { let nonce = eth_op_idx; let withdraw_op_tx = - create_signed_withdraw_tx(eth_op_idx, ð_sender, deadline_block, nonce); + create_signed_withdraw_tx(eth_op_idx, ð_sender, deadline_block, nonce).await; expected_txs.push(withdraw_op_tx); } - // Pair commit/verify operations. let mut operations_iter = commit_operations.iter().zip(verify_operations); @@ -665,16 +669,22 @@ fn concurrent_operations_order() { // If we'll send all the operations together, the order will be "commit-verify-commit-verify-withdraw", // since withdraw is only sent after verify operation is confirmed. let (commit_op, verify_op) = operations_iter.next().unwrap(); - sender - .try_send(ETHSenderRequest::SendOperation(commit_op.clone())) + println!("BEGIN"); + eth_sender + .db + .send_operation(commit_op.clone()) + .await .unwrap(); - sender - .try_send(ETHSenderRequest::SendOperation(verify_op.clone())) + eth_sender + .db + .send_operation(verify_op.clone()) + .await .unwrap(); - retrieve_all_operations(&mut eth_sender); + println!("END"); + eth_sender.load_new_operations().await; // Call `proceed_next_operations`. Several txs should be sent. - block_on(eth_sender.proceed_next_operations()); + eth_sender.proceed_next_operations().await; let commit_tx = &txs[0]; let verify_tx = &txs[1]; @@ -685,17 +695,18 @@ fn concurrent_operations_order() { let current_tx_hash = tx.used_tx_hashes[0]; // Check that current expected tx is stored. - eth_sender.db.assert_stored(&tx); - eth_sender.ethereum.assert_sent(¤t_tx_hash); + eth_sender.db.assert_stored(&tx).await; + eth_sender.ethereum.assert_sent(¤t_tx_hash).await; // Mark the tx as successfully eth_sender .ethereum - .add_successfull_execution(current_tx_hash, WAIT_CONFIRMATIONS); + .add_successfull_execution(current_tx_hash, WAIT_CONFIRMATIONS) + .await; } // Call `proceed_next_operations` again. Both txs should become confirmed. - block_on(eth_sender.proceed_next_operations()); + eth_sender.proceed_next_operations().await; for &tx in &[commit_tx, verify_tx] { let mut tx = tx.clone(); @@ -704,32 +715,28 @@ fn concurrent_operations_order() { // Update the fields in the tx and check if it's confirmed. tx.confirmed = true; tx.final_hash = Some(current_tx_hash); - eth_sender.db.assert_confirmed(&tx); + eth_sender.db.assert_confirmed(&tx).await; } // Now, the withdraw operation should be taken from the queue, and // sent to the Ethereum. - block_on(eth_sender.proceed_next_operations()); + eth_sender.proceed_next_operations().await; let withdraw_tx_hash = withdraw_tx.used_tx_hashes[0]; - eth_sender.db.assert_stored(&withdraw_tx); - eth_sender.ethereum.assert_sent(&withdraw_tx_hash); + eth_sender.db.assert_stored(&withdraw_tx).await; + eth_sender.ethereum.assert_sent(&withdraw_tx_hash).await; // Mark the tx as successfully eth_sender .ethereum - .add_successfull_execution(withdraw_tx_hash, WAIT_CONFIRMATIONS); + .add_successfull_execution(withdraw_tx_hash, WAIT_CONFIRMATIONS) + .await; // Call `proceed_next_operations` again. Withdraw tx should become confirmed. - block_on(eth_sender.proceed_next_operations()); + eth_sender.proceed_next_operations().await; // Update the fields in the tx and check if it's confirmed. withdraw_tx.confirmed = true; withdraw_tx.final_hash = Some(withdraw_tx_hash); - eth_sender.db.assert_confirmed(&withdraw_tx); - } - - // We should be notified about all the verify operations being completed. - for _ in 0..operations_count { - assert!(receiver.try_next().unwrap().is_some()); + eth_sender.db.assert_confirmed(&withdraw_tx).await; } } diff --git a/core/bin/zksync_eth_sender/src/tests/test_data.rs b/core/bin/zksync_eth_sender/src/tests/test_data.rs index 79251b68b7..33e5e8c2ec 100644 --- a/core/bin/zksync_eth_sender/src/tests/test_data.rs +++ b/core/bin/zksync_eth_sender/src/tests/test_data.rs @@ -49,7 +49,6 @@ fn get_operation(id: i64, block_number: u32, action: Action) -> Operation { 1_000_000.into(), 1_500_000.into(), ), - accounts_updated: Vec::new(), } } diff --git a/core/bin/zksync_prometheus_exporter/src/main.rs b/core/bin/zksync_prometheus_exporter/src/main.rs index f410378150..d6c0c52dd7 100644 --- a/core/bin/zksync_prometheus_exporter/src/main.rs +++ b/core/bin/zksync_prometheus_exporter/src/main.rs @@ -22,7 +22,7 @@ async fn main() -> anyhow::Result<()> { .expect("Error setting Ctrl-C handler"); } - let connection_pool = ConnectionPool::new(Some(PROMETHEUS_EXPORTER_CONNECTION_POOL_SIZE)).await; + let connection_pool = ConnectionPool::new(Some(PROMETHEUS_EXPORTER_CONNECTION_POOL_SIZE)); let config_options = ConfigurationOptions::from_env(); let task_handle = run_prometheus_exporter(connection_pool, &config_options); diff --git a/core/bin/zksync_witness_generator/src/main.rs b/core/bin/zksync_witness_generator/src/main.rs index 88113a4f78..82a560ecff 100644 --- a/core/bin/zksync_witness_generator/src/main.rs +++ b/core/bin/zksync_witness_generator/src/main.rs @@ -22,7 +22,7 @@ async fn main() -> anyhow::Result<()> { .expect("Error setting Ctrl-C handler"); } - let connection_pool = ConnectionPool::new(Some(WITNESS_GENERATOR_CONNECTION_POOL_SIZE)).await; + let connection_pool = ConnectionPool::new(Some(WITNESS_GENERATOR_CONNECTION_POOL_SIZE)); let config_options = ConfigurationOptions::from_env(); let prover_options = ProverOptions::from_env(); diff --git a/core/bin/zksync_witness_generator/tests/prover_server.rs b/core/bin/zksync_witness_generator/tests/prover_server.rs index 0e24458643..d5c7b40a67 100644 --- a/core/bin/zksync_witness_generator/tests/prover_server.rs +++ b/core/bin/zksync_witness_generator/tests/prover_server.rs @@ -15,7 +15,7 @@ use zksync_circuit::witness::utils::get_used_subtree_root_hash; use zksync_witness_generator::run_prover_server; async fn connect_to_db() -> zksync_storage::ConnectionPool { - zksync_storage::ConnectionPool::new(Some(1)).await + zksync_storage::ConnectionPool::new(Some(1)) } async fn spawn_server(prover_timeout: time::Duration, rounds_interval: time::Duration) -> String { diff --git a/core/lib/storage/Cargo.toml b/core/lib/storage/Cargo.toml index ce8da24e92..90e7e8f7d2 100644 --- a/core/lib/storage/Cargo.toml +++ b/core/lib/storage/Cargo.toml @@ -27,6 +27,7 @@ log = "0.4" anyhow = "1.0" itertools = "0.8" hex = "0.4" +parity-crypto = {version = "0.6.2", features = ["publickey"] } async-trait = "0.1" deadpool = "0.5.2" @@ -38,5 +39,4 @@ zksync_config = { path = "../config", version = "1.0" } db_test_macro = { path = "./db_test_macro" } env_logger = "0.6" -parity-crypto = {version = "0.6.2", features = ["publickey"] } tokio = { version = "0.2", features = ["full"] } diff --git a/core/lib/storage/sqlx-data.json b/core/lib/storage/sqlx-data.json index 04258133a3..e57814d612 100644 --- a/core/lib/storage/sqlx-data.json +++ b/core/lib/storage/sqlx-data.json @@ -3271,6 +3271,22 @@ ] } }, + "ec815cee37d8ac3557b523521a6bee44c7e8d949309e7dd9b0d0364edd2e85e9": { + "query": "INSERT INTO eth_parameters (nonce, gas_price_limit, commit_ops, verify_ops, withdraw_ops)\n VALUES ($1, $2, $3, $4, $5)", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Int8", + "Int8", + "Int8" + ] + }, + "nullable": [] + } + }, "ede4a4f728f00df1e8149f67bf3b3f1bdeb9733165163ac45338ed6fec037505": { "query": "SELECT COUNT(*) as \"count!\" FROM pending_withdrawals", "describe": { diff --git a/core/lib/storage/src/chain/block/records.rs b/core/lib/storage/src/chain/block/records.rs index 928b1b022e..a1a42b6180 100644 --- a/core/lib/storage/src/chain/block/records.rs +++ b/core/lib/storage/src/chain/block/records.rs @@ -47,7 +47,7 @@ pub struct BlockDetails { pub verified_at: Option>, } -#[derive(Debug, Serialize, Deserialize, FromRow)] +#[derive(Debug, Serialize, Deserialize, FromRow, PartialEq)] pub struct BlockTransactionItem { pub tx_hash: String, pub block_number: i64, @@ -62,3 +62,14 @@ pub struct AccountTreeCache { pub block: i64, pub tree_cache: String, } + +impl BlockDetails { + /// Checks if block is finalized, meaning that + /// both Verify operation is performed for it, and this + /// operation is anchored on the Ethereum blockchain. + pub fn is_verified(&self) -> bool { + // We assume that it's not possible to have block that is + // verified and not committed. + self.verified_at.is_some() && self.verify_tx_hash.is_some() + } +} diff --git a/core/lib/storage/src/connection/mod.rs b/core/lib/storage/src/connection/mod.rs index dd60ee5211..bdcf7f316f 100644 --- a/core/lib/storage/src/connection/mod.rs +++ b/core/lib/storage/src/connection/mod.rs @@ -61,7 +61,7 @@ impl ConnectionPool { /// Establishes a pool of the connections to the database and /// creates a new `ConnectionPool` object. /// pool_max_size - number of connections in pool, if not set env variable "DB_POOL_SIZE" is going to be used. - pub async fn new(pool_max_size: Option) -> Self { + pub fn new(pool_max_size: Option) -> Self { let database_url = Self::get_database_url(); let max_size = pool_max_size.unwrap_or_else(|| parse_env("DB_POOL_SIZE")); diff --git a/core/lib/storage/src/ethereum/mod.rs b/core/lib/storage/src/ethereum/mod.rs index 884095bc1e..58ce448ec2 100644 --- a/core/lib/storage/src/ethereum/mod.rs +++ b/core/lib/storage/src/ethereum/mod.rs @@ -448,7 +448,7 @@ impl<'a, 'c> EthereumSchema<'a, 'c> { /// Method that internally initializes the `eth_parameters` table. /// Since in db tests the database is empty, we must provide a possibility /// to initialize required db fields. - #[cfg(test)] + #[doc = "hidden"] pub async fn initialize_eth_data(&mut self) -> QueryResult<()> { #[derive(Debug)] pub struct NewETHParams { diff --git a/core/lib/storage/src/lib.rs b/core/lib/storage/src/lib.rs index ebd4c37c27..d7defa33ea 100644 --- a/core/lib/storage/src/lib.rs +++ b/core/lib/storage/src/lib.rs @@ -91,6 +91,7 @@ pub mod data_restore; pub mod diff; pub mod ethereum; pub mod prover; +pub mod test_data; pub mod tokens; pub mod utils; diff --git a/core/lib/storage/src/test_data.rs b/core/lib/storage/src/test_data.rs new file mode 100644 index 0000000000..83ff355dd0 --- /dev/null +++ b/core/lib/storage/src/test_data.rs @@ -0,0 +1,157 @@ +//! Utilities used to generate test data for tests that involve database interaction. + +// Built-in uses +use std::ops::Deref; + +// External imports +use num::BigUint; + +use parity_crypto::publickey::{Generator, Random}; +// Workspace imports +use zksync_crypto::{ff::PrimeField, rand::Rng, Fr}; +use zksync_types::{ + account::Account, + tx::{EthSignData, PackedEthSignature, TxEthSignature}, + Action, Address, Operation, H256, + { + block::{Block, ExecutedOperations}, + AccountUpdate, BlockNumber, PubKeyHash, + }, +}; +// Local imports + +/// Block size used for tests +pub const BLOCK_SIZE_CHUNKS: usize = 100; + +/// Generates a random account with a set of changes. +pub fn gen_acc_random_updates(rng: &mut R) -> impl Iterator { + let id: u32 = rng.gen(); + let balance = u128::from(rng.gen::()); + let nonce: u32 = rng.gen(); + let pub_key_hash = PubKeyHash { data: rng.gen() }; + let address: Address = rng.gen::<[u8; 20]>().into(); + + let mut a = Account::default_with_address(&address); + let old_nonce = nonce; + a.nonce = old_nonce + 2; + a.pub_key_hash = pub_key_hash; + + let old_balance = a.get_balance(0); + a.set_balance(0, BigUint::from(balance)); + let new_balance = a.get_balance(0); + vec![ + ( + id, + AccountUpdate::Create { + nonce: old_nonce, + address: a.address, + }, + ), + ( + id, + AccountUpdate::ChangePubKeyHash { + old_nonce, + old_pub_key_hash: PubKeyHash::default(), + new_nonce: old_nonce + 1, + new_pub_key_hash: a.pub_key_hash, + }, + ), + ( + id, + AccountUpdate::UpdateBalance { + old_nonce: old_nonce + 1, + new_nonce: old_nonce + 2, + balance_update: (0, old_balance, new_balance), + }, + ), + ] + .into_iter() +} + +/// Generates dummy operation with the default `new_root_hash` in the block. +pub fn gen_operation( + block_number: BlockNumber, + action: Action, + block_chunks_size: usize, +) -> Operation { + gen_operation_with_txs(block_number, action, block_chunks_size, vec![]) +} + +/// Generates dummy operation with the default `new_root_hash` in the block and given set of transactions. +pub fn gen_operation_with_txs( + block_number: BlockNumber, + action: Action, + block_chunks_size: usize, + txs: Vec, +) -> Operation { + Operation { + id: None, + action, + block: Block { + block_number, + new_root_hash: Fr::default(), + fee_account: 0, + block_transactions: txs, + processed_priority_ops: (0, 0), + block_chunks_size, + commit_gas_limit: 1_000_000.into(), + verify_gas_limit: 1_500_000.into(), + }, + } +} + +/// Generates EthSignData for testing (not a valid signature) +pub fn gen_eth_sing_data(message: String) -> EthSignData { + let keypair = Random.generate(); + let private_key = keypair.secret(); + + let signature = PackedEthSignature::sign(private_key.deref(), &message.as_bytes()).unwrap(); + + EthSignData { + signature: TxEthSignature::EthereumSignature(signature), + message: message.into_bytes(), + } +} + +/// Creates a dummy new root hash for the block based on its number. +pub fn dummy_root_hash_for_block(block_number: BlockNumber) -> Fr { + Fr::from_str(&block_number.to_string()).unwrap() +} + +/// Creates a dummy ethereum operation hash based on its number. +pub fn dummy_ethereum_tx_hash(ethereum_op_id: i64) -> H256 { + H256::from_low_u64_ne(ethereum_op_id as u64) +} + +/// Generates dummy operation with the unique `new_root_hash` in the block. +pub fn gen_unique_operation( + block_number: BlockNumber, + action: Action, + block_chunks_size: usize, +) -> Operation { + gen_unique_operation_with_txs(block_number, action, block_chunks_size, vec![]) +} + +/// Generates dummy operation with the unique `new_root_hash` in the block and +/// given set of transactions.. +pub fn gen_unique_operation_with_txs( + block_number: BlockNumber, + action: Action, + block_chunks_size: usize, + txs: Vec, +) -> Operation { + Operation { + id: None, + action, + block: Block { + block_number, + new_root_hash: dummy_root_hash_for_block(block_number), + fee_account: 0, + block_transactions: txs, + processed_priority_ops: (0, 0), + block_chunks_size, + commit_gas_limit: 1_000_000.into(), + verify_gas_limit: 1_500_000.into(), + }, + } +} diff --git a/core/lib/storage/src/tests/chain/block.rs b/core/lib/storage/src/tests/chain/block.rs index 208e825a0b..0b5adc415c 100644 --- a/core/lib/storage/src/tests/chain/block.rs +++ b/core/lib/storage/src/tests/chain/block.rs @@ -1,13 +1,13 @@ // External imports -use zksync_basic_types::H256; + // Workspace imports -use zksync_crypto::{convert::FeConvert, Fr}; -use zksync_crypto::{ff::PrimeField, rand::XorShiftRng}; -use zksync_types::{block::Block, helpers::apply_updates, AccountMap, AccountUpdate, BlockNumber}; -use zksync_types::{ethereum::OperationType, Action, Operation}; +use zksync_crypto::{convert::FeConvert, rand::XorShiftRng}; +use zksync_types::{ + ethereum::OperationType, helpers::apply_updates, AccountMap, AccountUpdate, AccountUpdates, + Action, BlockNumber, +}; // Local imports -use super::utils::{acc_create_random_updates, get_operation, get_operation_with_txs}; -use crate::tests::{create_rng, db_test}; +use super::utils::{get_operation, get_operation_with_txs}; use crate::{ chain::{ block::{records::BlockDetails, BlockSchema}, @@ -15,12 +15,13 @@ use crate::{ }, ethereum::EthereumSchema, prover::ProverSchema, + test_data::{ + dummy_ethereum_tx_hash, gen_acc_random_updates, gen_unique_operation, BLOCK_SIZE_CHUNKS, + }, + tests::{create_rng, db_test}, QueryResult, StorageProcessor, }; -/// block size used for this tests -const BLOCK_SIZE_CHUNKS: usize = 100; - /// Creates several random updates for the provided account map, /// and returns the resulting account map together with the list /// of generated updates. @@ -28,13 +29,10 @@ pub fn apply_random_updates( mut accounts: AccountMap, rng: &mut XorShiftRng, ) -> (AccountMap, Vec<(u32, AccountUpdate)>) { - let updates = { - let mut updates = Vec::new(); - updates.extend(acc_create_random_updates(rng)); - updates.extend(acc_create_random_updates(rng)); - updates.extend(acc_create_random_updates(rng)); - updates - }; + let updates = (0..3) + .map(|_| gen_acc_random_updates(rng)) + .flatten() + .collect::(); apply_updates(&mut accounts, updates.clone()); (accounts, updates) } @@ -141,34 +139,6 @@ async fn test_commit_rewind(mut storage: StorageProcessor<'_>) -> QueryResult<() Ok(()) } -/// Creates an unique new root hash for the block based on its number. -fn root_hash_for_block(block_number: BlockNumber) -> Fr { - Fr::from_str(&block_number.to_string()).unwrap() -} - -/// Creates an unique ethereum operation hash based on its number. -fn ethereum_tx_hash(ethereum_op_id: i64) -> H256 { - H256::from_low_u64_ne(ethereum_op_id as u64) -} - -/// Creates an operation with an unique hash. -fn get_unique_operation(block_number: BlockNumber, action: Action) -> Operation { - Operation { - id: None, - action, - block: Block::new( - block_number, - root_hash_for_block(block_number), - 0, - Vec::new(), - (0, 0), - 100, - 1_000_000.into(), - 1_500_000.into(), - ), - } -} - /// Checks that `find_block_by_height_or_hash` method allows /// to load the block details by either its height, hash of the included /// transaction, or the root hash of the block. @@ -257,7 +227,11 @@ async fn find_block_by_height_or_hash(mut storage: StorageProcessor<'_>) -> Quer // Store the operation in the block schema. let operation = BlockSchema(&mut storage) - .execute_operation(get_unique_operation(block_number, Action::Commit)) + .execute_operation(gen_unique_operation( + block_number, + Action::Commit, + BLOCK_SIZE_CHUNKS, + )) .await?; StateSchema(&mut storage) .commit_state_update(block_number, &updates, 0) @@ -266,7 +240,7 @@ async fn find_block_by_height_or_hash(mut storage: StorageProcessor<'_>) -> Quer // Store & confirm the operation in the ethereum schema, as it's used for obtaining // commit/verify hashes. let ethereum_op_id = operation.id.unwrap() as i64; - let eth_tx_hash = ethereum_tx_hash(ethereum_op_id); + let eth_tx_hash = dummy_ethereum_tx_hash(ethereum_op_id); let response = EthereumSchema(&mut storage) .save_new_eth_tx( OperationType::Commit, @@ -295,16 +269,17 @@ async fn find_block_by_height_or_hash(mut storage: StorageProcessor<'_>) -> Quer .store_proof(block_number, &Default::default()) .await?; let verify_operation = BlockSchema(&mut storage) - .execute_operation(get_unique_operation( + .execute_operation(gen_unique_operation( block_number, Action::Verify { proof: Default::default(), }, + BLOCK_SIZE_CHUNKS, )) .await?; let ethereum_op_id = verify_operation.id.unwrap() as i64; - let eth_tx_hash = ethereum_tx_hash(ethereum_op_id); + let eth_tx_hash = dummy_ethereum_tx_hash(ethereum_op_id); // Do not add an ethereum confirmation for the last operation. if block_number != n_verified { @@ -400,7 +375,11 @@ async fn block_range(mut storage: StorageProcessor<'_>) -> QueryResult<()> { // Store the operation in the block schema. let operation = BlockSchema(&mut storage) - .execute_operation(get_unique_operation(block_number, Action::Commit)) + .execute_operation(gen_unique_operation( + block_number, + Action::Commit, + BLOCK_SIZE_CHUNKS, + )) .await?; StateSchema(&mut storage) .commit_state_update(block_number, &updates, 0) @@ -409,7 +388,7 @@ async fn block_range(mut storage: StorageProcessor<'_>) -> QueryResult<()> { // Store & confirm the operation in the ethereum schema, as it's used for obtaining // commit/verify hashes. let ethereum_op_id = operation.id.unwrap() as i64; - let eth_tx_hash = ethereum_tx_hash(ethereum_op_id); + let eth_tx_hash = dummy_ethereum_tx_hash(ethereum_op_id); let response = EthereumSchema(&mut storage) .save_new_eth_tx( OperationType::Commit, @@ -432,15 +411,16 @@ async fn block_range(mut storage: StorageProcessor<'_>) -> QueryResult<()> { .store_proof(block_number, &Default::default()) .await?; let operation = BlockSchema(&mut storage) - .execute_operation(get_unique_operation( + .execute_operation(gen_unique_operation( block_number, Action::Verify { proof: Default::default(), }, + BLOCK_SIZE_CHUNKS, )) .await?; let ethereum_op_id = operation.id.unwrap() as i64; - let eth_tx_hash = ethereum_tx_hash(ethereum_op_id); + let eth_tx_hash = dummy_ethereum_tx_hash(ethereum_op_id); let response = EthereumSchema(&mut storage) .save_new_eth_tx( OperationType::Verify, @@ -500,7 +480,11 @@ async fn unconfirmed_transaction(mut storage: StorageProcessor<'_>) -> QueryResu // Store the operation in the block schema. let operation = BlockSchema(&mut storage) - .execute_operation(get_unique_operation(block_number, Action::Commit)) + .execute_operation(gen_unique_operation( + block_number, + Action::Commit, + BLOCK_SIZE_CHUNKS, + )) .await?; StateSchema(&mut storage) .commit_state_update(block_number, &updates, 0) @@ -509,7 +493,7 @@ async fn unconfirmed_transaction(mut storage: StorageProcessor<'_>) -> QueryResu // Store & confirm the operation in the ethereum schema, as it's used for obtaining // commit/verify hashes. let ethereum_op_id = operation.id.unwrap() as i64; - let eth_tx_hash = ethereum_tx_hash(ethereum_op_id); + let eth_tx_hash = dummy_ethereum_tx_hash(ethereum_op_id); let response = EthereumSchema(&mut storage) .save_new_eth_tx( OperationType::Commit, @@ -535,15 +519,16 @@ async fn unconfirmed_transaction(mut storage: StorageProcessor<'_>) -> QueryResu .store_proof(block_number, &Default::default()) .await?; let operation = BlockSchema(&mut storage) - .execute_operation(get_unique_operation( + .execute_operation(gen_unique_operation( block_number, Action::Verify { proof: Default::default(), }, + BLOCK_SIZE_CHUNKS, )) .await?; let ethereum_op_id = operation.id.unwrap() as i64; - let eth_tx_hash = ethereum_tx_hash(ethereum_op_id); + let eth_tx_hash = dummy_ethereum_tx_hash(ethereum_op_id); let response = EthereumSchema(&mut storage) .save_new_eth_tx( OperationType::Verify, diff --git a/core/lib/storage/src/tests/chain/utils.rs b/core/lib/storage/src/tests/chain/utils.rs index 72b392fe81..4295b30a54 100644 --- a/core/lib/storage/src/tests/chain/utils.rs +++ b/core/lib/storage/src/tests/chain/utils.rs @@ -1,116 +1,5 @@ -// External imports - -use parity_crypto::publickey::{Generator, Random}; -use zksync_basic_types::Address; -// Workspace imports -use num::BigUint; -use std::ops::Deref; -use zksync_crypto::rand::Rng; -use zksync_crypto::Fr; -use zksync_types::tx::{EthSignData, PackedEthSignature, TxEthSignature}; -use zksync_types::{ - Action, Operation, - { - block::{Block, ExecutedOperations}, - AccountUpdate, BlockNumber, PubKeyHash, - }, +// Reexports for compatibility with the existing code. +pub use crate::test_data::{ + gen_acc_random_updates as acc_create_random_updates, gen_eth_sing_data as get_eth_sign_data, + gen_operation as get_operation, gen_operation_with_txs as get_operation_with_txs, }; -// Local imports - -pub fn acc_create_random_updates( - rng: &mut R, -) -> impl Iterator { - let id: u32 = rng.gen(); - let balance = u128::from(rng.gen::()); - let nonce: u32 = rng.gen(); - let pub_key_hash = PubKeyHash { data: rng.gen() }; - let address: Address = rng.gen::<[u8; 20]>().into(); - - let mut a = zksync_types::account::Account::default_with_address(&address); - let old_nonce = nonce; - a.nonce = old_nonce + 2; - a.pub_key_hash = pub_key_hash; - - let old_balance = a.get_balance(0); - a.set_balance(0, BigUint::from(balance)); - let new_balance = a.get_balance(0); - vec![ - ( - id, - AccountUpdate::Create { - nonce: old_nonce, - address: a.address, - }, - ), - ( - id, - AccountUpdate::ChangePubKeyHash { - old_nonce, - old_pub_key_hash: PubKeyHash::default(), - new_nonce: old_nonce + 1, - new_pub_key_hash: a.pub_key_hash, - }, - ), - ( - id, - AccountUpdate::UpdateBalance { - old_nonce: old_nonce + 1, - new_nonce: old_nonce + 2, - balance_update: (0, old_balance, new_balance), - }, - ), - ] - .into_iter() -} - -pub fn get_operation(block_number: BlockNumber, action: Action, block_size: usize) -> Operation { - Operation { - id: None, - action, - block: Block::new( - block_number, - Fr::default(), - 0, - Vec::new(), - (0, 0), - block_size, - 1_000_000.into(), - 1_500_000.into(), - ), - } -} - -pub fn get_operation_with_txs( - block_number: BlockNumber, - action: Action, - block_size: usize, - txs: Vec, -) -> Operation { - Operation { - id: None, - action, - block: Block::new( - block_number, - Fr::default(), - 0, - txs, - (0, 0), - block_size, - 1_000_000.into(), - 1_500_000.into(), - ), - } -} - -/// Generates EthSignData for testing (not a valid signature) -pub fn get_eth_sign_data(message: String) -> EthSignData { - let keypair = Random.generate(); - let private_key = keypair.secret(); - - let signature = PackedEthSignature::sign(private_key.deref(), &message.as_bytes()).unwrap(); - - EthSignData { - message, - signature: TxEthSignature::EthereumSignature(signature), - } -} diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 97dafb7f3c..5c74d80f54 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -43,6 +43,7 @@ pub mod ethereum; pub mod gas_counter; pub mod helpers; pub mod mempool; +pub mod network; pub mod operations; pub mod priority_ops; pub mod tokens; @@ -71,7 +72,7 @@ pub type AccountUpdates = Vec<(u32, AccountUpdate)>; pub type AccountTree = SparseMerkleTree>; use crate::block::Block; -use zksync_crypto::{ +pub use zksync_crypto::{ merkle_tree::{RescueHasher, SparseMerkleTree}, proof::EncodedProofPlonk, Engine, Fr, diff --git a/sdk/zksync-rs/src/types/network.rs b/core/lib/types/src/network.rs similarity index 71% rename from sdk/zksync-rs/src/types/network.rs rename to core/lib/types/src/network.rs index 3e59e2502b..315eb97852 100644 --- a/sdk/zksync-rs/src/types/network.rs +++ b/core/lib/types/src/network.rs @@ -1,14 +1,16 @@ -//! //! The network where the zkSync resides. //! -use std::fmt; -use std::str::FromStr; +// Built-in uses +use std::{fmt, str::FromStr}; -use serde::Deserialize; -use serde::Serialize; +// External uses +use serde::{Deserialize, Serialize}; + +// Workspace uses + +// Local uses -/// /// Network to be used for a zkSync client. /// #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq)] @@ -51,3 +53,16 @@ impl fmt::Display for Network { } } } + +impl Network { + /// Returns the network chain ID on the Ethereum side. + pub fn chain_id(self) -> u8 { + match self { + Network::Mainnet => 1, + Network::Ropsten => 3, + Network::Rinkeby => 4, + Network::Localhost => 9, + Network::Unknown => panic!("Unknown chain ID"), + } + } +} diff --git a/core/lib/types/src/tx/primitives/tx_hash.rs b/core/lib/types/src/tx/primitives/tx_hash.rs index 1cddb835ad..1aacac18ea 100644 --- a/core/lib/types/src/tx/primitives/tx_hash.rs +++ b/core/lib/types/src/tx/primitives/tx_hash.rs @@ -8,6 +8,22 @@ pub struct TxHash { pub(crate) data: [u8; 32], } +impl TxHash { + /// Reads a transaction hash from its byte sequence representation. + /// + /// Returns none if the slice length does not match with hash length. + pub fn from_slice(slice: &[u8]) -> Option { + let mut out = TxHash { data: [0_u8; 32] }; + + if slice.len() != out.data.len() { + None + } else { + out.data.copy_from_slice(slice); + Some(out) + } + } +} + impl AsRef<[u8]> for TxHash { fn as_ref(&self) -> &[u8] { &self.data diff --git a/core/lib/types/src/tx/zksync_tx.rs b/core/lib/types/src/tx/zksync_tx.rs index 6c947c1e42..1cda406cc7 100644 --- a/core/lib/types/src/tx/zksync_tx.rs +++ b/core/lib/types/src/tx/zksync_tx.rs @@ -14,7 +14,7 @@ use zksync_basic_types::Address; #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct EthSignData { pub signature: TxEthSignature, - pub message: String, + pub message: Vec, } #[derive(Debug, Clone, Serialize, Deserialize)] diff --git a/docker/prover/Dockerfile b/docker/prover/Dockerfile index a674d57efc..918abe4a0b 100644 --- a/docker/prover/Dockerfile +++ b/docker/prover/Dockerfile @@ -10,9 +10,7 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \ cargo build --release FROM debian:buster-slim -RUN curl -sL https://deb.nodesource.com/setup_14.x | bash - -RUN apt-get update && apt-get install -y axel ca-certificates nodejs npm \ - && npm install -g yarn && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y axel ca-certificates && rm -rf /var/lib/apt/lists/* # Docs of env variables used for this image # to test this locally just run # docker build -t tmp_prover -f =(f envsubst < ./docker/prover/Dockerfile) .; docker run --rm tmp_prover:latest @@ -38,7 +36,5 @@ COPY --from=builder /usr/src/zksync/target/release/dummy_prover /bin/ COPY docker/prover/prover-entry.sh /bin/ COPY keys/packed /keys/packed COPY contracts/build /contracts/build -COPY infrastructure/zk /infrastructure/zk -COPY bin/zk /bin/ CMD ["prover-entry.sh"] diff --git a/docker/prover/prover-entry.sh b/docker/prover/prover-entry.sh index 07020de5b0..cf0a9c92ec 100755 --- a/docker/prover/prover-entry.sh +++ b/docker/prover/prover-entry.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -e + export ZKSYNC_HOME="/" PROVER_NAME=`hostname` @@ -33,17 +35,27 @@ function get_required_plonk_setup_powers() { # we download only keys used in node (defined by $BLOCK_CHUNK_SIZES) REQUIRED_SETUP_POWS=`get_required_plonk_setup_powers` -# install zk -echo installing zk -cd /infrastructure/zk && yarn install && yarn build if [ "$PROVER_DOWNLOAD_SETUP" == "false" ]; then echo Downloading setup powers $REQUIRED_SETUP_POWS - /bin/zk run plonk-setup $REQUIRED_SETUP_POWS + + SETUP_DO_SPACE_DIR=https://universal-setup.ams3.digitaloceanspaces.com + mkdir -p keys/setup && pushd keys/setup + + for i in ${REQUIRED_SETUP_POWS//,/ }; do + axel -c $SETUP_DO_SPACE_DIR/setup_2%5E$i.key || true # don't download file if it is already there + sleep 1 # to not receive "503 Slow Down" + done + + popd + echo Setup is downloaded fi -/bin/zk run verify-keys unpack +VERIFY_KEYS_TARBAL="verify-keys-`basename $KEY_DIR`-account-"$ACCOUNT_TREE_DEPTH"_-balance-$BALANCE_TREE_DEPTH.tar.gz" -echo key download complete, starting prover +# checks if keys are present and if so, unpacks them +[ -f keys/packed/$VERIFY_KEYS_TARBAL ] || (echo Keys file $VERIFY_KEYS_TARBAL not found && exit 1) +tar xf keys/packed/$VERIFY_KEYS_TARBAL +echo Keys unpacked, starting prover exec plonk_step_by_step_prover "$PROVER_NAME" 2>&1 diff --git a/infrastructure/zk/src/test/test.ts b/infrastructure/zk/src/test/test.ts index 5bbf287d03..943f226184 100644 --- a/infrastructure/zk/src/test/test.ts +++ b/infrastructure/zk/src/test/test.ts @@ -5,16 +5,38 @@ import * as contract from '../contract'; import * as integration from './integration'; export { integration }; -export async function db(reset: boolean) { +async function runOnTestDb(reset: boolean, dir: string, command: string) { const databaseUrl = process.env.DATABASE_URL as string; process.env.DATABASE_URL = databaseUrl.replace(/plasma/g, 'plasma_test'); process.chdir('core/lib/storage'); if (reset) { + console.info("Performing database reset...") await utils.exec('diesel database reset'); await utils.exec('diesel migration run'); } - await utils.spawn('cargo test --release -p zksync_storage --features db_test -- --nocapture'); process.chdir(process.env.ZKSYNC_HOME as string); + + process.chdir(dir); + await utils.spawn(command); + process.chdir(process.env.ZKSYNC_HOME as string); +} + +export async function db(reset: boolean, ...args: string[]) { + await runOnTestDb( + reset, + 'core/lib/storage', + `cargo test --release -p zksync_storage --features db_test -- --nocapture + ${args.join(' ')}` + ); +} + +export async function rustApi(reset: boolean, ...args: string[]) { + await runOnTestDb( + reset, + 'core/bin/zksync_api', + `cargo test --release -p zksync_api --features api_test -- --nocapture + ${args.join(' ')}` + ); } export async function contracts() { @@ -41,6 +63,7 @@ export async function js() { export async function rust() { await utils.spawn('cargo test --release'); await db(true); + await rustApi(true) await prover(); const { stdout: threads } = await utils.exec('nproc'); await circuit(parseInt(threads)); @@ -56,11 +79,22 @@ command.command('rust').description('run unit-tests for all rust binaries and li command .command('db') .description('run unit-tests for the database') - .option('--reset') - .action(async (cmd: Command) => { - await db(cmd.reset); + .option('--no-reset', "do not reset the database before test starting") + .allowUnknownOption() + .action(async (cmd: Command, options: string[] | undefined) => { + await db(!cmd.reset, ...options || []); }); +command + .command('rust-api') + .description('run unit-tests for the REST API') + .option('--no-reset', "do not reset the database before test starting") + .allowUnknownOption() + .action(async (cmd: Command, options: string[] | undefined) => { + await rustApi(cmd.reset, ...options || []); + }); + + command .command('circuit [threads] [test_name] [options...]') .description('run unit-tests for the circuit') diff --git a/sdk/zksync-crypto/build.sh b/sdk/zksync-crypto/build.sh index 2470dca786..aaa3da544c 100755 --- a/sdk/zksync-crypto/build.sh +++ b/sdk/zksync-crypto/build.sh @@ -9,9 +9,6 @@ wasm-pack build --release --target=bundler --out-name=zksync-crypto-bundler --ou # pack for browser wasm-pack build --release --target=web --out-name=zksync-crypto-web --out-dir=dist -cat >> dist/zksync-crypto-web.js < { + jsCode = jsCode.replace(new RegExp(str, 'g'), '// ' + str); +}); + +jsCode += ` +const base64WasmCode = \`${wasmData.toString('base64')}\`; + +function base64ToArrayBuffer(base64) { + const binaryString = window.atob(base64); + const length = binaryString.length; + const bytes = new Uint8Array(length); + + for (let i = 0; i < length; i++) { + bytes[i] = binaryString.charCodeAt(i); + } + return bytes.buffer; +} + +const wasmBytes = base64ToArrayBuffer(base64WasmCode); + +const wasmResponseInit = { + "status" : 200 , + "statusText" : "ok.", + headers: { + 'Content-Type': 'application/wasm', + 'Content-Length': wasmBytes.length + } +}; + +export async function loadZkSyncCrypto(wasmFileUrl) { + if (!wasmFileUrl) { + const wasmResponse = new Response(wasmBytes, wasmResponseInit); + await init(wasmResponse); + } else { + await init(DefaultZksyncCryptoWasmURL); + } +} +`; + +fs.writeFileSync(jsFile, jsCode); diff --git a/sdk/zksync-rs/src/credentials.rs b/sdk/zksync-rs/src/credentials.rs index e445e16a1b..41368eda45 100644 --- a/sdk/zksync-rs/src/credentials.rs +++ b/sdk/zksync-rs/src/credentials.rs @@ -1,9 +1,9 @@ -use crate::{error::ClientError, types::network::Network, utils::private_key_from_seed}; +use crate::{error::ClientError, utils::private_key_from_seed}; use web3::types::{Address, H256}; use zksync_crypto::PrivateKey; use zksync_eth_signer::{EthereumSigner, PrivateKeySigner}; -use zksync_types::tx::TxEthSignature; +use zksync_types::{network::Network, tx::TxEthSignature}; pub struct WalletCredentials { pub(crate) eth_signer: Option, diff --git a/sdk/zksync-rs/src/ethereum/mod.rs b/sdk/zksync-rs/src/ethereum/mod.rs index 2cffc0c719..ed5e157348 100644 --- a/sdk/zksync-rs/src/ethereum/mod.rs +++ b/sdk/zksync-rs/src/ethereum/mod.rs @@ -13,8 +13,7 @@ use zksync_eth_signer::EthereumSigner; use zksync_types::{AccountId, PriorityOp, TokenLike}; use crate::{ - error::ClientError, provider::Provider, tokens_cache::TokensCache, types::network::Network, - utils::u256_to_biguint, + error::ClientError, provider::Provider, tokens_cache::TokensCache, utils::u256_to_biguint, }; const IERC20_INTERFACE: &str = include_str!("abi/IERC20.json"); @@ -39,18 +38,6 @@ pub fn ierc20_contract() -> ethabi::Contract { load_contract(IERC20_INTERFACE) } -impl Network { - pub fn chain_id(self) -> u8 { - match self { - Network::Mainnet => 1, - Network::Ropsten => 3, - Network::Rinkeby => 4, - Network::Localhost => 9, - Network::Unknown => panic!("Attempt to connect to an unknown network"), - } - } -} - /// `EthereumProvider` gains access to on-chain operations, such as deposits and full exits. /// Methods to interact with Ethereum return corresponding Ethereum transaction hash. /// In order to monitor transaction execution, an Ethereum node `web3` API is exposed diff --git a/sdk/zksync-rs/src/lib.rs b/sdk/zksync-rs/src/lib.rs index 8ae420dfa8..949b44dba3 100644 --- a/sdk/zksync-rs/src/lib.rs +++ b/sdk/zksync-rs/src/lib.rs @@ -10,9 +10,9 @@ pub mod utils; pub mod wallet; pub use crate::{ - credentials::WalletCredentials, ethereum::EthereumProvider, provider::Provider, - types::network::Network, wallet::Wallet, + credentials::WalletCredentials, ethereum::EthereumProvider, provider::Provider, wallet::Wallet, }; +pub use zksync_types::network::Network; pub use web3; pub use zksync_types; diff --git a/sdk/zksync-rs/src/provider.rs b/sdk/zksync-rs/src/provider.rs index 792bc4e0ff..86a7d40e84 100644 --- a/sdk/zksync-rs/src/provider.rs +++ b/sdk/zksync-rs/src/provider.rs @@ -9,13 +9,14 @@ use jsonrpc_core::{types::response::Output, ErrorCode}; // Workspace uses use zksync_types::{ + network::Network, tx::{PackedEthSignature, TxHash, ZkSyncTx}, Address, TokenLike, TxFeeTypes, }; // Local uses use self::messages::JsonRpcRequest; -use crate::{error::ClientError, types::network::Network, types::*}; +use crate::{error::ClientError, types::*}; /// Returns a corresponding address for a provided network name. pub fn get_rpc_addr(network: Network) -> &'static str { diff --git a/sdk/zksync-rs/src/types/mod.rs b/sdk/zksync-rs/src/types/mod.rs index 4c6f4ce032..d96eccdfd5 100644 --- a/sdk/zksync-rs/src/types/mod.rs +++ b/sdk/zksync-rs/src/types/mod.rs @@ -4,8 +4,6 @@ use std::collections::HashMap; use zksync_types::{AccountId, Address, Nonce, PubKeyHash, Token}; use zksync_utils::{BigUintSerdeAsRadix10Str, BigUintSerdeWrapper}; -pub mod network; - pub type Tokens = HashMap; #[derive(Debug, Clone, Serialize, Deserialize, Default)] diff --git a/sdk/zksync.js/package.json b/sdk/zksync.js/package.json index d549c58ccb..094eb3a70e 100644 --- a/sdk/zksync.js/package.json +++ b/sdk/zksync.js/package.json @@ -1,6 +1,6 @@ { "name": "zksync", - "version": "0.7.5", + "version": "0.8.1", "license": "MIT", "main": "build/index.js", "types": "build/index.d.ts", @@ -16,7 +16,7 @@ "axios": "^0.21.0", "websocket": "^1.0.30", "websocket-as-promised": "^1.1.0", - "zksync-crypto": "^0.3.1" + "zksync-crypto": "^0.4.1" }, "peerDependencies": { "@ethersproject/logger": "^5.0.0", diff --git a/sdk/zksync.js/rollup.config.js b/sdk/zksync.js/rollup.config.js index 7a78a91ca1..35e2ff3f4c 100644 --- a/sdk/zksync.js/rollup.config.js +++ b/sdk/zksync.js/rollup.config.js @@ -1,7 +1,6 @@ import resolve from '@rollup/plugin-node-resolve'; import commonjs from '@rollup/plugin-commonjs'; import json from '@rollup/plugin-json'; -import copy from 'rollup-plugin-copy'; import {terser} from "rollup-plugin-terser"; function resolveWithZksyncCryptoReplace(options) { @@ -35,10 +34,6 @@ export default [ }), commonjs(), json(), - copy({ - targets: [{src: 'node_modules/zksync-crypto/dist/zksync-crypto-web_bg.wasm', dest: 'dist/'}], - verbose: true - }), terser(), ] }, diff --git a/sdk/zksync.js/src/crypto.ts b/sdk/zksync.js/src/crypto.ts index 4ce9ff0c09..c782b822a5 100644 --- a/sdk/zksync.js/src/crypto.ts +++ b/sdk/zksync.js/src/crypto.ts @@ -4,9 +4,15 @@ import { private_key_to_pubkey_hash, sign_musig } from 'zksync-crypto'; import * as zks from 'zksync-crypto'; import { utils } from 'ethers'; -export { privateKeyFromSeed } from 'zksync-crypto'; +export async function privateKeyFromSeed(seed: Uint8Array): Promise { + await loadZkSyncCrypto(); + + return zks.privateKeyFromSeed(seed); +} + +export async function signTransactionBytes(privKey: Uint8Array, bytes: Uint8Array): Promise { + await loadZkSyncCrypto(); -export function signTransactionBytes(privKey: Uint8Array, bytes: Uint8Array): Signature { const signaturePacked = sign_musig(privKey, bytes); const pubKey = utils.hexlify(signaturePacked.slice(0, 32)).substr(2); const signature = utils.hexlify(signaturePacked.slice(32)).substr(2); @@ -16,20 +22,24 @@ export function signTransactionBytes(privKey: Uint8Array, bytes: Uint8Array): Si }; } -export function privateKeyToPubKeyHash(privateKey: Uint8Array): string { +export async function privateKeyToPubKeyHash(privateKey: Uint8Array): Promise { + await loadZkSyncCrypto(); + return `sync:${utils.hexlify(private_key_to_pubkey_hash(privateKey)).substr(2)}`; } let zksyncCryptoLoaded = false; - export async function loadZkSyncCrypto(wasmFileUrl?: string) { + if(zksyncCryptoLoaded) { + return; + } // Only runs in the browser - if ((zks as any).default) { - // @ts-ignore - const url = wasmFileUrl ? wasmFileUrl : zks.DefaultZksyncCryptoWasmURL; - if (!zksyncCryptoLoaded) { - await (zks as any).default(url); - zksyncCryptoLoaded = true; - } + if ((zks as any).loadZkSyncCrypto) { + // It is ok if wasmFileUrl is not specified. + // Actually, typically it should not be specified, + // since the content of the `.wasm` file is read + // from the `.js` file itself. + await (zks as any).loadZkSyncCrypto(wasmFileUrl); + zksyncCryptoLoaded = true; } } diff --git a/sdk/zksync.js/src/signer.ts b/sdk/zksync.js/src/signer.ts index 5efbc046ba..45804499b7 100644 --- a/sdk/zksync.js/src/signer.ts +++ b/sdk/zksync.js/src/signer.ts @@ -21,11 +21,11 @@ export class Signer { this.privateKey = privKey; } - pubKeyHash(): PubKeyHash { - return privateKeyToPubKeyHash(this.privateKey); + async pubKeyHash(): Promise { + return await privateKeyToPubKeyHash(this.privateKey); } - signSyncTransfer(transfer: { + async signSyncTransfer(transfer: { accountId: number; from: Address; to: Address; @@ -33,7 +33,7 @@ export class Signer { amount: BigNumberish; fee: BigNumberish; nonce: number; - }): Transfer { + }): Promise { const type = new Uint8Array([5]); // tx type const accountId = serializeAccountId(transfer.accountId); const from = serializeAddress(transfer.from); @@ -44,7 +44,7 @@ export class Signer { const nonce = serializeNonce(transfer.nonce); const msgBytes = ethers.utils.concat([type, accountId, from, to, token, amount, fee, nonce]); - const signature = signTransactionBytes(this.privateKey, msgBytes); + const signature = await signTransactionBytes(this.privateKey, msgBytes); return { type: 'Transfer', @@ -59,7 +59,7 @@ export class Signer { }; } - signSyncWithdraw(withdraw: { + async signSyncWithdraw(withdraw: { accountId: number; from: Address; ethAddress: string; @@ -67,7 +67,7 @@ export class Signer { amount: BigNumberish; fee: BigNumberish; nonce: number; - }): Withdraw { + }): Promise { const typeBytes = new Uint8Array([3]); const accountId = serializeAccountId(withdraw.accountId); const accountBytes = serializeAddress(withdraw.from); @@ -86,7 +86,8 @@ export class Signer { feeBytes, nonceBytes ]); - const signature = signTransactionBytes(this.privateKey, msgBytes); + const signature = await signTransactionBytes(this.privateKey, msgBytes); + return { type: 'Withdraw', accountId: withdraw.accountId, @@ -100,13 +101,13 @@ export class Signer { }; } - signSyncForcedExit(forcedExit: { + async signSyncForcedExit(forcedExit: { initiatorAccountId: number; target: Address; tokenId: number; fee: BigNumberish; nonce: number; - }): ForcedExit { + }): Promise { const typeBytes = new Uint8Array([8]); const initiatorAccountIdBytes = serializeAccountId(forcedExit.initiatorAccountId); const targetBytes = serializeAddress(forcedExit.target); @@ -121,7 +122,7 @@ export class Signer { feeBytes, nonceBytes ]); - const signature = signTransactionBytes(this.privateKey, msgBytes); + const signature = await signTransactionBytes(this.privateKey, msgBytes); return { type: 'ForcedExit', initiatorAccountId: forcedExit.initiatorAccountId, @@ -133,14 +134,14 @@ export class Signer { }; } - signSyncChangePubKey(changePubKey: { + async signSyncChangePubKey(changePubKey: { accountId: number; account: Address; newPkHash: PubKeyHash; feeTokenId: number; fee: BigNumberish; nonce: number; - }): ChangePubKey { + }): Promise { const typeBytes = new Uint8Array([7]); // Tx type (1 byte) const accountIdBytes = serializeAccountId(changePubKey.accountId); const accountBytes = serializeAddress(changePubKey.account); @@ -157,7 +158,7 @@ export class Signer { feeBytes, nonceBytes ]); - const signature = signTransactionBytes(this.privateKey, msgBytes); + const signature = await signTransactionBytes(this.privateKey, msgBytes); return { type: 'ChangePubKey', accountId: changePubKey.accountId, @@ -175,8 +176,8 @@ export class Signer { return new Signer(pk); } - static fromSeed(seed: Uint8Array): Signer { - return new Signer(privateKeyFromSeed(seed)); + static async fromSeed(seed: Uint8Array): Promise { + return new Signer(await privateKeyFromSeed(seed)); } static async fromETHSignature( @@ -199,7 +200,7 @@ export class Signer { const address = await ethSigner.getAddress(); const ethSignatureType = await getEthSignatureType(ethSigner.provider, message, signature, address); const seed = ethers.utils.arrayify(signature); - const signer = Signer.fromSeed(seed); + const signer = await Signer.fromSeed(seed); return { signer, ethSignatureType }; } } diff --git a/sdk/zksync.js/src/utils.ts b/sdk/zksync.js/src/utils.ts index 8bfd931985..eb1e38bfc3 100644 --- a/sdk/zksync.js/src/utils.ts +++ b/sdk/zksync.js/src/utils.ts @@ -326,9 +326,13 @@ export async function verifyERC1271Signature( signerOrProvider: ethers.Signer | ethers.providers.Provider ): Promise { const EIP1271_SUCCESS_VALUE = '0x1626ba7e'; + + // sign_message = keccak256("\x19Ethereum Signed Message:\n32" + keccak256(message)) const hash = utils.keccak256(message); + const sign_message = utils.hashMessage(hash); + const eip1271 = new ethers.Contract(address, IEIP1271_INTERFACE, signerOrProvider); - const eipRetVal = await eip1271.isValidSignature(utils.hexlify(hash), signature); + const eipRetVal = await eip1271.isValidSignature(sign_message, signature); return eipRetVal === EIP1271_SUCCESS_VALUE; } diff --git a/sdk/zksync.js/src/wallet.ts b/sdk/zksync.js/src/wallet.ts index 4e4d16433d..d3f96bcfd0 100644 --- a/sdk/zksync.js/src/wallet.ts +++ b/sdk/zksync.js/src/wallet.ts @@ -86,7 +86,7 @@ export class Wallet { return wallet; } - async getEthMessageSignature(message: string): Promise { + async getEthMessageSignature(message: ethers.utils.BytesLike): Promise { if (this.ethSignerType == null) { throw new Error('ethSignerType is unknown'); } @@ -176,7 +176,7 @@ export class Wallet { nonce: forcedExit.nonce }; - const signedForcedExitTransaction = this.signer.signSyncForcedExit(transactionData); + const signedForcedExitTransaction = await this.signer.signSyncForcedExit(transactionData); return { tx: signedForcedExitTransaction @@ -251,8 +251,9 @@ export class Wallet { bytes = ethers.utils.concat([bytes, serializeTransfer(tx)]); batch.push({ tx, signature: null }); } - - const ethSignature = await this.getEthMessageSignature(ethers.utils.keccak256(bytes).slice(2)); + const hash = ethers.utils.keccak256(bytes).slice(2); + const message = Uint8Array.from(Buffer.from(hash, 'hex')); + const ethSignature = await this.getEthMessageSignature(message); const transactionHashes = await this.provider.submitTxsBatch(batch, ethSignature); return transactionHashes.map((txHash, idx) => new Transaction(batch[idx], txHash, this.provider)); @@ -310,7 +311,7 @@ export class Wallet { const txMessageEthSignature = await this.getEthMessageSignature(humanReadableTxInfo); - const signedWithdrawTransaction = this.signer.signSyncWithdraw(transactionData); + const signedWithdrawTransaction = await this.signer.signSyncWithdraw(transactionData); return { tx: signedWithdrawTransaction, @@ -345,7 +346,7 @@ export class Wallet { throw new Error('ZKSync signer is required for current pubkey calculation.'); } const currentPubKeyHash = await this.getCurrentPubKeyHash(); - const signerPubKeyHash = this.signer.pubKeyHash(); + const signerPubKeyHash = await this.signer.pubKeyHash(); return currentPubKeyHash === signerPubKeyHash; } @@ -360,7 +361,7 @@ export class Wallet { } const feeTokenId = await this.provider.tokenSet.resolveTokenId(changePubKey.feeToken); - const newPubKeyHash = this.signer.pubKeyHash(); + const newPubKeyHash = await this.signer.pubKeyHash(); await this.setRequiredAccountIdFromServer('Set Signing Key'); @@ -369,10 +370,10 @@ export class Wallet { ? null : (await this.getEthMessageSignature(changePubKeyMessage)).signature; - const changePubKeyTx: ChangePubKey = this.signer.signSyncChangePubKey({ + const changePubKeyTx: ChangePubKey = await this.signer.signSyncChangePubKey({ accountId: this.accountId, account: this.address(), - newPkHash: this.signer.pubKeyHash(), + newPkHash: await this.signer.pubKeyHash(), nonce: changePubKey.nonce, feeTokenId, fee: BigNumber.from(changePubKey.fee).toString() @@ -445,7 +446,7 @@ export class Wallet { } const currentPubKeyHash = await this.getCurrentPubKeyHash(); - const newPubKeyHash = this.signer.pubKeyHash(); + const newPubKeyHash = await this.signer.pubKeyHash(); if (currentPubKeyHash === newPubKeyHash) { throw new Error('Current PubKeyHash is the same as new'); diff --git a/yarn.lock b/yarn.lock index 5f57f47e93..f8bdffa40c 100644 --- a/yarn.lock +++ b/yarn.lock @@ -13646,15 +13646,15 @@ yorkie@^2.0.0: normalize-path "^1.0.0" strip-indent "^2.0.0" -zksync-crypto@^0.3.1: - version "0.3.2" - resolved "https://registry.yarnpkg.com/zksync-crypto/-/zksync-crypto-0.3.2.tgz#2e073d1c42a09f65efe41119e3e065edff87fb95" - integrity sha512-fLNZ82wddyevNe9O6WEB0Ls3DnttkJH5s7E1tXXq9mGHMVSn/oMHFn35BD6ZRBhALdLexKbalWoJ89Ua93yzFg== +zksync-crypto@^0.4.1: + version "0.4.1" + resolved "https://registry.yarnpkg.com/zksync-crypto/-/zksync-crypto-0.4.1.tgz#1093f2dac2f6c126effa878f25fa44fa48f12246" + integrity sha512-y8GwqIKEbnJjnHKYBffnhvSmkxp43lLOApI2VuZ3kzIni+HFTlSsNDDSJAvXv10GRldMomv2MzTJq/LxZJgKmA== "zksync@link:sdk/zksync.js": - version "0.7.5" + version "0.8.1" dependencies: axios "^0.21.0" websocket "^1.0.30" websocket-as-promised "^1.1.0" - zksync-crypto "^0.3.1" + zksync-crypto "^0.4.1"