Skip to content

Commit

Permalink
Merge branch 'dev' of github.com:matter-labs/zksync-dev
Browse files Browse the repository at this point in the history
  • Loading branch information
Deniallugo committed May 27, 2022
2 parents 28b7402 + 6fc4d09 commit b838853
Show file tree
Hide file tree
Showing 9 changed files with 58 additions and 24 deletions.
18 changes: 18 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ jobs:
run: |
echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
echo $(pwd)/bin >> $GITHUB_PATH
## Setup sccache GCS key (update docker-compose .env)
SCCACHE_TEMP=$(echo `mktemp -d`)/sa_key.json; echo $SCCACHE_CGS_KEY > $SCCACHE_TEMP
echo "HOST_GCS_KEY_PATH=$SCCACHE_TEMP" >> .env
- name: start-services
run: |
Expand Down Expand Up @@ -46,6 +49,9 @@ jobs:
run: |
echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
echo $(pwd)/bin >> $GITHUB_PATH
## Setup sccache GCS key (update docker-compose .env)
SCCACHE_TEMP=$(echo `mktemp -d`)/sa_key.json; echo $SCCACHE_CGS_KEY > $SCCACHE_TEMP
echo "HOST_GCS_KEY_PATH=$SCCACHE_TEMP" >> .env
- name: start-services
run: |
Expand Down Expand Up @@ -88,6 +94,9 @@ jobs:
run: |
echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
echo $(pwd)/bin >> $GITHUB_PATH
## Setup sccache GCS key (update docker-compose .env)
SCCACHE_TEMP=$(echo `mktemp -d`)/sa_key.json; echo $SCCACHE_CGS_KEY > $SCCACHE_TEMP
echo "HOST_GCS_KEY_PATH=$SCCACHE_TEMP" >> .env
- name: start-services
run: |
Expand Down Expand Up @@ -145,6 +154,9 @@ jobs:
run: |
echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
echo $(pwd)/bin >> $GITHUB_PATH
## Setup sccache GCS key (update docker-compose .env)
SCCACHE_TEMP=$(echo `mktemp -d`)/sa_key.json; echo $SCCACHE_CGS_KEY > $SCCACHE_TEMP
echo "HOST_GCS_KEY_PATH=$SCCACHE_TEMP" >> .env
- name: start-services
run: |
Expand Down Expand Up @@ -173,6 +185,9 @@ jobs:
echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
echo $(pwd)/bin >> $GITHUB_PATH
echo PLUGIN_CONFIG=fast >> $GITHUB_ENV # use fast mode for geth image (instant tx commit)
## Setup sccache GCS key (update docker-compose .env)
SCCACHE_TEMP=$(echo `mktemp -d`)/sa_key.json; echo $SCCACHE_CGS_KEY > $SCCACHE_TEMP
echo "HOST_GCS_KEY_PATH=$SCCACHE_TEMP" >> .env
- name: start-services
run: |
Expand Down Expand Up @@ -201,6 +216,9 @@ jobs:
run: |
echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
echo $(pwd)/bin >> $GITHUB_PATH
## Setup sccache GCS key (update docker-compose .env)
SCCACHE_TEMP=$(echo `mktemp -d`)/sa_key.json; echo $SCCACHE_CGS_KEY > $SCCACHE_TEMP
echo "HOST_GCS_KEY_PATH=$SCCACHE_TEMP" >> .env
- name: start-services
run: |
Expand Down
15 changes: 11 additions & 4 deletions core/bin/data_restore/src/data_restore_driver.rs
Original file line number Diff line number Diff line change
Expand Up @@ -530,12 +530,17 @@ impl<T: Transport> DataRestoreDriver<T> {
// TODO (ZKS-722): either due to Ethereum node lag or unknown
// bug in the events state, we have to additionally filter out
// already processed rollup blocks.
let mut last_processed_block = self.tree_state.block_number;
for event in self
.events_state
.get_only_verified_committed_events()
.iter()
.filter(|bl| bl.block_num > self.tree_state.block_number)
{
// For some reasons, we have a bug where event state contains duplicates for blocks
if last_processed_block >= event.block_num {
continue;
}
// We use an aggregated block in contracts, which means that several BlockEvent can include the same tx_hash,
// but for correct restore we need to generate RollupBlocks from this tx only once.
// These blocks go one after the other, and checking only the last transaction hash is safe.
Expand All @@ -557,10 +562,12 @@ impl<T: Transport> DataRestoreDriver<T> {
last_event_tx_hash = Some(event.transaction_hash);
}

let rollup_block = last_tx_blocks
.remove(&event.block_num)
.expect("Block not found");
blocks.push(rollup_block);
if let Some(rollup_block) = last_tx_blocks.remove(&event.block_num) {
blocks.push(rollup_block);
last_processed_block = event.block_num;
} else {
panic!("Block not found")
}
}

blocks
Expand Down
2 changes: 2 additions & 0 deletions core/bin/zksync_api/src/fee_ticker/constants.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,11 @@ pub(crate) const BASE_TRANSFER_TO_NEW_COST: u64 = VerifyCost::TRANSFER_TO_NEW_CO
+ CommitCost::TRANSFER_TO_NEW_COST
+ AMORTIZED_COST_PER_CHUNK * (TransferToNewOp::CHUNKS as u64);
pub(crate) const BASE_WITHDRAW_COST: u64 = VerifyCost::WITHDRAW_COST
+ VerifyCost::PENDING_WITHDRAW_COST
+ CommitCost::WITHDRAW_COST
+ AMORTIZED_COST_PER_CHUNK * (WithdrawOp::CHUNKS as u64);
pub(crate) const BASE_WITHDRAW_NFT_COST: u64 = VerifyCost::WITHDRAW_NFT_COST
+ VerifyCost::PENDING_WITHDRAW_NFT_COST
+ CommitCost::WITHDRAW_NFT_COST
+ AMORTIZED_COST_PER_CHUNK * (WithdrawNFTOp::CHUNKS as u64);
pub(crate) const BASE_OLD_CHANGE_PUBKEY_OFFCHAIN_COST: u64 =
Expand Down
18 changes: 10 additions & 8 deletions core/bin/zksync_core/src/state_keeper/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -308,17 +308,19 @@ impl ZkSyncStateKeeper {
// By giving these hashes to the mempool,
// we won't receive back transactions that we already executed in the current block.

// It's counterintuitive, but we want to exclude only successful operations, because in the situation,
// when we are under attack by rejected txs, pending block becomes massive and contains a lot of rejected txs,
// as a result we will send tons of tx_hash to the database.
// Excluding success operations is mandatory because they will fail in the next step,
// at the same time executing rejected txs is safe, in the worst-case scenario, they become successful.
// Keeping in mind that we regularly clean the memmpool from executing txs, it's impossible when
// tons of rejected txs will be returned from the database.

let executed_txs = self
.pending_block
.failed_txs
.success_operations
.iter()
.map(|tx| tx.signed_tx.hash())
.chain(
self.pending_block
.success_operations
.iter()
.filter_map(|op| op.get_executed_tx().map(|tx| tx.signed_tx.hash())),
)
.filter_map(|op| op.get_executed_tx().map(|tx| tx.signed_tx.hash()))
.collect();

let mempool_req = MempoolBlocksRequest::GetBlock(GetBlockRequest {
Expand Down
16 changes: 9 additions & 7 deletions core/lib/mempool/src/transactions_handler.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,13 +119,15 @@ impl MempoolTransactionsHandler {
TxAddError::DbError
})?;

for op in &ops {
let labels = vec![
("stage", "mempool".to_string()),
("name", op.data.variance_name()),
("token", op.data.token_id().to_string()),
];
metrics::increment_counter!("process_tx_count", &labels);
if confirmed {
for op in &ops {
let labels = vec![
("stage", "mempool".to_string()),
("name", op.data.variance_name()),
("token", op.data.token_id().to_string()),
];
metrics::increment_counter!("process_tx_count", &labels);
}
}

Ok(())
Expand Down
6 changes: 4 additions & 2 deletions core/lib/types/src/gas_counter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,12 @@ impl VerifyCost {
pub const TRANSFER_TO_NEW_COST: u64 = 0;
pub const SWAP_COST: u64 = 0;
pub const FULL_EXIT_COST: u64 = 30_000;
pub const WITHDRAW_COST: u64 = 48_000;
pub const WITHDRAW_COST: u64 = 30_000;
pub const PENDING_WITHDRAW_COST: u64 = 60_000;
pub const FORCED_EXIT_COST: u64 = Self::WITHDRAW_COST;
pub const MINT_NFT_COST: u64 = 0;
pub const WITHDRAW_NFT_COST: u64 = 200_000;
pub const WITHDRAW_NFT_COST: u64 = 80_000;
pub const PENDING_WITHDRAW_NFT_COST: u64 = 240_000;

pub fn base_cost() -> U256 {
U256::from(Self::BASE_COST)
Expand Down
2 changes: 1 addition & 1 deletion core/tests/loadnext/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use loadnext::{config::LoadtestConfig, executor::Executor, report_collector::Loa

#[tokio::main]
async fn main() -> anyhow::Result<()> {
vlog::init();
let _vlog_guard = vlog::init();

let config = LoadtestConfig::from_env().unwrap_or_else(|err| {
vlog::warn!(
Expand Down
2 changes: 1 addition & 1 deletion docker-compose-runner.yml
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ services:
- .:/usr/src/zksync
- /usr/src/cache:/usr/src/cache
- /usr/src/keys:/usr/src/keys
- /etc/sa_secret:/etc/sa_secret
- /var/run/docker.sock:/var/run/docker.sock
- ${HOST_GCS_KEY_PATH:-/etc/sa_secret/sa_key.json}:/etc/sa_secret/sa_key.json
environment:
- IN_DOCKER=true
- CACHE_DIR=/usr/src/cache
Expand Down
3 changes: 2 additions & 1 deletion docker/data-restore/data-restore-entry.sh
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ function reset_db() {
cd core/lib/storage
psql "$DATABASE_URL" -c 'DROP OWNED BY CURRENT_USER CASCADE' || /bin/true
psql "$DATABASE_URL" -c 'DROP SCHEMA IF EXISTS public CASCADE' || /bin/true
psql "$DATABASE_URL" -c 'CREATE SCHEMA public'
psql "$DATABASE_URL" -c 'CREATE SCHEMA public' || /bin/true
diesel database setup
cd $ZKSYNC_HOME
}
Expand Down Expand Up @@ -87,4 +87,5 @@ else
fi

migrate

$ZKSYNC_HOME/target/release/zksync_data_restore $COMMAND $MODE --config $CONFIG_FILE --web3 $WEB3_URL || exit 1

0 comments on commit b838853

Please sign in to comment.