Skip to content

Commit

Permalink
Merge dev
Browse files Browse the repository at this point in the history
  • Loading branch information
perekopskiy committed Aug 19, 2021
2 parents 4df5e30 + 36e8f38 commit c6716d6
Show file tree
Hide file tree
Showing 211 changed files with 3,881 additions and 18,885 deletions.
11 changes: 10 additions & 1 deletion .github/workflows/deploy-zksync.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ on:
deployment_id:
description: 'Deployment which triggers this pipeline (optional)'
required: false
scaleup:
description: 'Force scaling the server deployment up to 1 replica'
required: false

defaults:
run:
Expand All @@ -36,6 +39,7 @@ jobs:
namespace: ${{ steps.envMap.outputs.namespace }}
runner: ${{ steps.envMap.outputs.runner }}
jobOutcomes: ${{ steps.set.outputs.jobOutcomes }}
scalup: ${{ github.event.inputs.scaleup == 'true' }}

steps:
- uses: actions/checkout@v2
Expand Down Expand Up @@ -84,7 +88,7 @@ jobs:
with:
workflow: Update Config
token: ${{ secrets.GH_TOKEN }}
wait-for-completion-timeout: 100s
wait-for-completion-timeout: 5m
wait-for-completion-interval: 20s
inputs: |
{
Expand Down Expand Up @@ -124,6 +128,11 @@ jobs:
run: |
UPDATE_REPOS=y helmfile -e $HFENV repos
helmfile -e $HFENV $DEPLOY_APPS apply --args "timeout 180s"
-
name: Scale Up
if: needs.setup.outputs.scaleup == 'true'
run: |
kubectl scale deployment -n ${{ needs.setup.outputs.namespace }} server --replicas=1
-
if: always()
name: Update deployment status
Expand Down
125 changes: 79 additions & 46 deletions .github/workflows/loadtest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,18 +8,42 @@ env:
HF_ARGS: -e loadtest

jobs:
sqlproxy:
name: Provision SQLProxy
runs-on: [k8s, stage, deployer]
update:
name: Update Configs
runs-on: [k8s, deployer, stage]
steps:
- name: Update Config
uses: aurelien-baudet/workflow-dispatch@v2
with:
workflow: Update Config
token: ${{ secrets.GH_TOKEN }}
wait-for-completion-timeout: 5m
wait-for-completion-interval: 20s
inputs: |
{
"environment": "${{ env.ZKSYNC_ENV }}",
"ref": "master"
}
setup:
name: Setup and Provision SQLProxy
## stage selects the specific cluster
runs-on: [k8s, deployer, stage]
needs: [update]
container:
image: dysnix/kubectl:v1.19-gcloud
image: dysnix/kubectl:v1.20-gcloud

env:
KUBECONF: ${{ secrets.STAGE_KUBECONF }}
HF_DEPLOYS: -l name=sqlproxy
DEPLOY_APPS: -l name=sqlproxy

outputs:
image_tag: ${{ steps.set.outputs.shortRev }}

steps:
- uses: actions/checkout@v2
- id: set
run: echo "::set-output name=shortRev::$(git rev-parse --short HEAD)"
-
name: Clone helm-infra
uses: actions/checkout@v2
Expand All @@ -32,22 +56,24 @@ jobs:
name: Create ~/.kube/config
run: ./.github/scripts/write-kubeconf.sh
-
name: Setup helm and loadtest.env
run: |
.github/scripts/zksync-env.sh --kube loadtest > etc/env/loadtest.env
echo -n "ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY=" >> etc/env/loadtest.env
kubectl get secret -n loadtest server-env -o jsonpath='{.data.ETH_SENDER_SENDER_OPERATOR_PRIVATE_KEY}' | base64 -d >> etc/env/loadtest.env
-
name: Deploy SQLProxy
name: Provision SQLProxy
working-directory: helm-infra
run: |
UPDATE_REPOS=y helmfile $HF_ARGS repos
helmfile $HF_ARGS $HF_DEPLOYS sync
helmfile $HF_ARGS $DEPLOY_APPS sync
-
name: Create etc/env/loadtest.env
run: |
mkdir -p etc/env/
kubectl get cm -n ${ZKSYNC_ENV} server-env-custom -o json | jq -j '.data | to_entries | .[] | "\(.key)=\(.value)\n"' > etc/env/${ZKSYNC_ENV}.env
kubectl get secret -n ${ZKSYNC_ENV} secretenv-zksync -o go-template='{{range $k, $v := .data}}{{ printf "%s=%s\n" $k ($v | base64decode) }}{{end}}' \
>> etc/env/${ZKSYNC_ENV}.env
init:
dbsetup:
name: Cleanup the database, compile and update contracts
runs-on: [k8s, stage, deployer]
needs: sqlproxy
## stage selects the specific cluster
runs-on: [k8s, deployer, stage]
needs: [setup]
env:
RUSTUP_HOME: /usr/share/rust/.rustup
CARGO_HOME: /usr/share/rust/.cargo
Expand All @@ -71,7 +97,7 @@ jobs:
-
name: Update cargo dependencies
run: |
cargo install --version=0.5.2 sqlx-cli
cargo install --version=0.5.6 sqlx-cli
cargo install diesel_cli --no-default-features --features postgres
-
name: Setup loadtest database
Expand All @@ -86,36 +112,41 @@ jobs:
cd contracts && zk f yarn governance-add-erc20 add-multi-current-network rinkeby
zk db insert contract
deploy:
name: Deploy the apps
runs-on: [k8s, stage, deployer]
needs: init
update-configmap:
name: Deploy contracts
runs-on: [k8s, deployer, stage]
needs: [dbsetup]
container:
image: dysnix/kubectl:v1.19-gcloud
env:
KUBECONF: ${{ secrets.STAGE_KUBECONF }}
IMAGE_TAG: ${{ needs.pre.outputs.shortRev }}
HF_DEPLOYS: -l name=server -l name=prover
image: dysnix/kubectl:v1.20-gcloud
steps:
-
name: Create ~/.kube/config
run: .github/scripts/write-kubeconf.sh
-
name: Update contracts in the ConfigMap
run: .github/scripts/zksync-env.sh --update-from deployed_contracts.log
-
id: config
run: |
echo "::set-output name=shortRev::$(git rev-parse --short HEAD)"
-
name: Deploy apps
working-directory: helm-infra
run: |
UPDATE_REPOS=y helmfile $HF_ARGS repos
export IMAGE_TAG=${{ steps.config.outputs.shortRev }}
# Split apply into diff+sync (required for extra sync args), what for server to settle
helmfile $HF_ARGS $HF_DEPLOYS diff
helmfile $HF_ARGS $HF_DEPLOYS sync --args "--timeout 180s" && sleep 60
mkdir -p etc/env/
kubectl get cm -n ${ZKSYNC_ENV} server-env-custom -o json | jq -j '.data | to_entries | .[] | "\(.key)=\(.value)\n"' > etc/env/${ZKSYNC_ENV}.env
cat deployed_contracts.log >> etc/env/${ZKSYNC_ENV}.env
kubectl delete configmap -n ${ZKSYNC_ENV} server-env-custom || /bin/true
kubectl create configmap -n ${ZKSYNC_ENV} server-env-custom --from-env-file=etc/env/${ZKSYNC_ENV}.env
deploy:
name: Deploy apps
runs-on: [k8s, deployer, stage]
needs: [update-configmap]
steps:
- name: Deploy
uses: aurelien-baudet/workflow-dispatch@v2
with:
workflow: Deploy
token: ${{ secrets.GH_TOKEN }}
wait-for-completion-timeout: 10m
wait-for-completion-interval: 1m
inputs: |
{
"environment": "${{ env.ZKSYNC_ENV }}",
"image_tag": "${{ needs.setup.outputs.image_tag }}",
"scaleup": "true"
}
loadtest:
name: Perform loadtest
Expand All @@ -138,15 +169,17 @@ jobs:

cleanup:
name: Cleanup loadtest environment
runs-on: [k8s, stage, deployer]
needs: loadtest
if: always()
## stage selects the specific cluster
runs-on: [k8s, deployer, stage]
needs: [dbsetup, loadtest]
container:
image: dysnix/kubectl:v1.19-gcloud
image: dysnix/kubectl:v1.20-gcloud
env:
KUBECONF: ${{ secrets.STAGE_KUBECONF }}

if: always()
steps:
- run: rm -f etc/env/${ZKSYNC_ENV}.env
-
name: Create ~/.kube/config
run: .github/scripts/write-kubeconf.sh
Expand Down
47 changes: 23 additions & 24 deletions contracts/contracts/AdditionalZkSync.sol
Original file line number Diff line number Diff line change
Expand Up @@ -57,19 +57,18 @@ contract AdditionalZkSync is Storage, Config, Events, ReentrancyGuard {
require(!performedExodus[_accountId][_tokenId], "t"); // already exited
require(storedBlockHashes[totalBlocksExecuted] == hashStoredBlockInfo(_storedBlockInfo), "u"); // incorrect stored block info

bool proofCorrect =
verifier.verifyExitProof(
_storedBlockInfo.stateHash,
_accountId,
_owner,
_tokenId,
_amount,
_nftCreatorAccountId,
_nftCreatorAddress,
_nftSerialId,
_nftContentHash,
_proof
);
bool proofCorrect = verifier.verifyExitProof(
_storedBlockInfo.stateHash,
_accountId,
_owner,
_tokenId,
_amount,
_nftCreatorAccountId,
_nftCreatorAddress,
_nftSerialId,
_nftContentHash,
_proof
);
require(proofCorrect, "x");

if (_tokenId <= MAX_FUNGIBLE_TOKEN_ID) {
Expand All @@ -78,15 +77,14 @@ contract AdditionalZkSync is Storage, Config, Events, ReentrancyGuard {
emit WithdrawalPending(uint16(_tokenId), _amount);
} else {
require(_amount != 0, "Z"); // Unsupported nft amount
Operations.WithdrawNFT memory withdrawNftOp =
Operations.WithdrawNFT(
_nftCreatorAccountId,
_nftCreatorAddress,
_nftSerialId,
_nftContentHash,
_owner,
_tokenId
);
Operations.WithdrawNFT memory withdrawNftOp = Operations.WithdrawNFT(
_nftCreatorAccountId,
_nftCreatorAddress,
_nftSerialId,
_nftContentHash,
_owner,
_tokenId
);
pendingWithdrawnNFTs[_tokenId] = withdrawNftOp;
emit WithdrawalNFTPending(_tokenId);
}
Expand Down Expand Up @@ -121,8 +119,9 @@ contract AdditionalZkSync is Storage, Config, Events, ReentrancyGuard {
function cutUpgradeNoticePeriod() external {
requireActive();

address payable[SECURITY_COUNCIL_MEMBERS_NUMBER] memory SECURITY_COUNCIL_MEMBERS =
[$(SECURITY_COUNCIL_MEMBERS)];
address payable[SECURITY_COUNCIL_MEMBERS_NUMBER] memory SECURITY_COUNCIL_MEMBERS = [
$(SECURITY_COUNCIL_MEMBERS)
];
for (uint256 id = 0; id < SECURITY_COUNCIL_MEMBERS_NUMBER; ++id) {
if (SECURITY_COUNCIL_MEMBERS[id] == msg.sender) {
require(upgradeStartTimestamp != 0);
Expand Down
9 changes: 4 additions & 5 deletions contracts/contracts/DeployFactory.sol
Original file line number Diff line number Diff line change
Expand Up @@ -59,11 +59,10 @@ contract DeployFactory is TokenDeployInit {
// set this contract as governor
Proxy verifier = new Proxy(address(_verifierTarget), abi.encode());
AdditionalZkSync additionalZkSync = new AdditionalZkSync();
Proxy zkSync =
new Proxy(
address(_zksyncTarget),
abi.encode(address(governance), address(verifier), address(additionalZkSync), _genesisRoot)
);
Proxy zkSync = new Proxy(
address(_zksyncTarget),
abi.encode(address(governance), address(verifier), address(additionalZkSync), _genesisRoot)
);

UpgradeGatekeeper upgradeGatekeeper = new UpgradeGatekeeper(zkSync);

Expand Down
7 changes: 6 additions & 1 deletion contracts/contracts/Operations.sol
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,12 @@ library Operations {

// ChangePubKey

enum ChangePubkeyType {ECRECOVER, CREATE2, OldECRECOVER, ECRECOVERV2}
enum ChangePubkeyType {
ECRECOVER,
CREATE2,
OldECRECOVER,
ECRECOVERV2
}

struct ChangePubKey {
// uint8 opType; -- present in pubdata, ignored at serialization
Expand Down
34 changes: 16 additions & 18 deletions contracts/contracts/PlonkCore.sol
Original file line number Diff line number Diff line change
Expand Up @@ -981,14 +981,13 @@ contract Plonk4VerifierWithAccessToDNext {
uint256[] memory individual_vks_inputs,
uint256[16] memory subproofs_limbs
) internal view returns (bool) {
(uint256 recursive_input, PairingsBn254.G1Point[2] memory aggregated_g1s) =
reconstruct_recursive_public_input(
recursive_vks_root,
max_valid_index,
recursive_vks_indexes,
individual_vks_inputs,
subproofs_limbs
);
(uint256 recursive_input, PairingsBn254.G1Point[2] memory aggregated_g1s) = reconstruct_recursive_public_input(
recursive_vks_root,
max_valid_index,
recursive_vks_indexes,
individual_vks_inputs,
subproofs_limbs
);

assert(recursive_input == proof.input_values[0]);

Expand Down Expand Up @@ -1188,16 +1187,15 @@ contract VerifierWithDeserialize is Plonk4VerifierWithAccessToDNext {

Proof memory proof = deserialize_proof(public_inputs, serialized_proof);

bool valid =
verify_recursive(
proof,
vk,
recursive_vks_root,
max_valid_index,
recursive_vks_indexes,
individual_vks_inputs,
subproofs_limbs
);
bool valid = verify_recursive(
proof,
vk,
recursive_vks_root,
max_valid_index,
recursive_vks_indexes,
individual_vks_inputs,
subproofs_limbs
);

return valid;
}
Expand Down
Loading

0 comments on commit c6716d6

Please sign in to comment.