Skip to content

Commit

Permalink
Merge branch 'dev' into vb-remove-duplicate-code-rest-api
Browse files Browse the repository at this point in the history
  • Loading branch information
dvush authored Sep 11, 2020
2 parents e3bb777 + 02b0a45 commit c859c79
Show file tree
Hide file tree
Showing 11 changed files with 299 additions and 38 deletions.
26 changes: 15 additions & 11 deletions core/circuit/src/witness/tests/test_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,18 +66,22 @@ impl PlasmaStateGenerator {
.map(|acc| (acc.id, acc.account.clone()))
.collect();

if accounts.iter().any(|(id, _)| *id == FEE_ACCOUNT_ID) {
panic!("AccountId {} is an existing fee account", FEE_ACCOUNT_ID);
}

let validator_accounts = std::iter::once((
FEE_ACCOUNT_ID,
Account::default_with_address(&Address::default()),
))
.chain(accounts)
.collect();
let accounts = if accounts.iter().any(|(id, _)| *id == FEE_ACCOUNT_ID) {
println!(
"Note: AccountId {} is an existing fee account",
FEE_ACCOUNT_ID
);
accounts.into_iter().collect()
} else {
std::iter::once((
FEE_ACCOUNT_ID,
Account::default_with_address(&Address::default()),
))
.chain(accounts)
.collect()
};

Self::create_state(validator_accounts)
Self::create_state(accounts)
}
}

Expand Down
58 changes: 55 additions & 3 deletions core/models/src/merkle_tree/parallel_smt.rs
Original file line number Diff line number Diff line change
@@ -1,15 +1,18 @@
/// Sparse Merkle tree with batch updates
use super::hasher::Hasher;
use crate::node::Fr;
use crate::primitives::GetBits;
use fnv::FnvHashMap;
use crypto_exports::ff::{PrimeField, PrimeFieldRepr};

use fnv::FnvHashMap;
use serde::{Deserialize, Serialize};
use std::fmt::Debug;
use std::sync::{RwLock, RwLockReadGuard};

/// Nodes are indexed starting with index(root) = 0
/// To store the index, at least 2 * TREE_HEIGHT bits is required.
/// Wrapper-structure is used to avoid mixing up with `ItemIndex` on the type level.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)]
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
struct NodeIndex(pub u64);

/// Lead index: 0 <= i < N.
Expand Down Expand Up @@ -99,7 +102,7 @@ where
}

/// Merkle Tree branch node.
#[derive(Debug, Clone)]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Node {
depth: Depth,
index: NodeIndex,
Expand Down Expand Up @@ -649,6 +652,55 @@ where
}
}

#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SparseMerkleTreeSerializableCacheBN256 {
root: NodeRef,
nodes: Vec<Node>,
cache: Vec<(NodeIndex, [u8; 32])>,
}

impl<T, H> SparseMerkleTree<T, Fr, H>
where
T: GetBits,
H: Hasher<Fr>,
{
pub fn get_internals(&self) -> SparseMerkleTreeSerializableCacheBN256 {
SparseMerkleTreeSerializableCacheBN256 {
root: self.root,
nodes: self.nodes.clone(),
cache: self
.cache
.read()
.unwrap()
.iter()
.map(|(idx, fr)| {
let mut fr_bytes = [0u8; 32];
fr.into_repr()
.write_be(&mut fr_bytes[..])
.expect("Fr write error");
(*idx, fr_bytes)
})
.collect(),
}
}

pub fn set_internals(&mut self, internals: SparseMerkleTreeSerializableCacheBN256) {
self.root = internals.root;
self.nodes = internals.nodes;
self.cache = RwLock::new(
internals
.cache
.into_iter()
.map(|(idx, fr_bytes)| {
let mut fr_repr = <Fr as PrimeField>::Repr::default();
fr_repr.read_be(&fr_bytes[..]).expect("Fr read error");
(idx, Fr::from_repr(fr_repr).expect("Fr decode error"))
})
.collect(),
);
}
}

#[cfg(test)]
mod tests {
use super::*;
Expand Down
97 changes: 80 additions & 17 deletions core/server/src/prover_server/witness_generator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ use models::{
use plasma::state::CollectedFee;
use prover::prover_data::ProverData;
use std::time::Instant;
use storage::StorageProcessor;

/// The essential part of this structure is `maintain` function
/// which runs forever and adds data to the database.
Expand Down Expand Up @@ -87,27 +88,89 @@ impl WitnessGenerator {
Ok(block_info)
}

fn load_account_tree(
&self,
block: BlockNumber,
storage: &StorageProcessor,
) -> Result<CircuitAccountTree, failure::Error> {
let mut circuit_account_tree = CircuitAccountTree::new(account_tree_depth());

if let Some((cached_block, account_tree_cache)) =
storage.chain().block_schema().get_account_tree_cache()?
{
let (_, accounts) = storage
.chain()
.state_schema()
.load_committed_state(Some(block))?;
for (id, account) in accounts {
circuit_account_tree.insert(id, account.into());
}
circuit_account_tree.set_internals(serde_json::from_value(account_tree_cache)?);
if block != cached_block {
let (_, accounts) = storage
.chain()
.state_schema()
.load_committed_state(Some(block))?;
if let Some((_, account_updates)) = storage
.chain()
.state_schema()
.load_state_diff(block, Some(cached_block))?
{
let mut updated_accounts = account_updates
.into_iter()
.map(|(id, _)| id)
.collect::<Vec<_>>();
updated_accounts.sort();
updated_accounts.dedup();
for idx in updated_accounts {
circuit_account_tree
.insert(idx, accounts.get(&idx).cloned().unwrap_or_default().into());
}
}
circuit_account_tree.root_hash();
let account_tree_cache = circuit_account_tree.get_internals();
storage
.chain()
.block_schema()
.store_account_tree_cache(block, serde_json::to_value(account_tree_cache)?)?;
}
} else {
let (_, accounts) = storage
.chain()
.state_schema()
.load_committed_state(Some(block))?;
for (id, account) in accounts {
circuit_account_tree.insert(id, account.into());
}
circuit_account_tree.root_hash();
let account_tree_cache = circuit_account_tree.get_internals();
storage
.chain()
.block_schema()
.store_account_tree_cache(block, serde_json::to_value(account_tree_cache)?)?;
}

if block != 0 {
let storage_block = storage
.chain()
.block_schema()
.get_block(block)?
.expect("Block for witness generator must exist");
assert_eq!(
storage_block.new_root_hash,
circuit_account_tree.root_hash(),
"account tree root hash restored incorrectly"
);
}
Ok(circuit_account_tree)
}

fn prepare_witness_and_save_it(&self, block: Block) -> Result<(), failure::Error> {
let timer = Instant::now();
let storage = self.conn_pool.access_storage_fragile()?;
let (storage_block_number, mut accounts) = storage
.chain()
.state_schema()
.load_committed_state(Some(block.block_number - 1))
.map_err(|e| failure::format_err!("couldn't load committed state: {}", e))?;
trace!(
"Witness generator get state account {}s",
timer.elapsed().as_secs()
);
failure::ensure!(block.block_number == storage_block_number + 1);

let timer = Instant::now();
let mut circuit_account_tree = CircuitAccountTree::new(account_tree_depth());
for (id, account) in accounts.drain() {
circuit_account_tree.insert(id, account.into());
}
let mut circuit_account_tree = self.load_account_tree(block.block_number - 1, &storage)?;
trace!(
"Witness generator circuit tree insert {}s",
"Witness generator loading circuit account tree {}s",
timer.elapsed().as_secs()
);

Expand Down
66 changes: 63 additions & 3 deletions core/server/src/state_keeper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,15 +158,75 @@ impl PlasmaStateInitParams {
Ok(init_params)
}

fn load_from_db(&mut self, storage: &storage::StorageProcessor) -> Result<(), failure::Error> {
fn load_account_tree(
&mut self,
storage: &storage::StorageProcessor,
) -> Result<BlockNumber, failure::Error> {
let (verified_block, accounts) = storage.chain().state_schema().load_verified_state()?;
for (id, account) in accounts {
self.insert_account(id, account);
}

if let Some(account_tree_cache) = storage
.chain()
.block_schema()
.get_account_tree_cache_block(verified_block)?
{
self.tree
.set_internals(serde_json::from_value(account_tree_cache)?);
} else {
self.tree.root_hash();
let account_tree_cache = self.tree.get_internals();
storage.chain().block_schema().store_account_tree_cache(
verified_block,
serde_json::to_value(account_tree_cache)?,
)?;
}

let (block_number, accounts) = storage
.chain()
.state_schema()
.load_committed_state(None)
.map_err(|e| failure::format_err!("couldn't load committed state: {}", e))?;
for (account_id, account) in accounts.into_iter() {
self.insert_account(account_id, account);

if block_number != verified_block {
if let Some((_, account_updates)) = storage
.chain()
.state_schema()
.load_state_diff(verified_block, Some(block_number))?
{
let mut updated_accounts = account_updates
.into_iter()
.map(|(id, _)| id)
.collect::<Vec<_>>();
updated_accounts.sort();
updated_accounts.dedup();
for idx in updated_accounts {
if let Some(acc) = accounts.get(&idx).cloned() {
self.insert_account(idx, acc);
} else {
self.remove_account(idx);
}
}
}
}
if block_number != 0 {
let storage_root_hash = storage
.chain()
.block_schema()
.get_block(block_number)?
.expect("restored block must exist");
assert_eq!(
storage_root_hash.new_root_hash,
self.tree.root_hash(),
"restored root_hash is different"
);
}
Ok(block_number)
}

fn load_from_db(&mut self, storage: &storage::StorageProcessor) -> Result<(), failure::Error> {
let block_number = self.load_account_tree(storage)?;
self.last_block_number = block_number;
self.unprocessed_priority_op = Self::unprocessed_priority_op_id(&storage, block_number)?;

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
DROP TABLE IF EXISTS account_tree_cache;
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
-- Stored cache for account merkle tree.
CREATE TABLE account_tree_cache
(
block BIGINT REFERENCES blocks (number) ON UPDATE CASCADE ON DELETE CASCADE,
tree_cache jsonb NOT NULL,
PRIMARY KEY (block)
);
52 changes: 51 additions & 1 deletion core/storage/src/chain/block/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@ use models::{
fe_from_bytes, fe_to_bytes, node::block::PendingBlock, Action, ActionType, Operation,
};
// Local imports
use self::records::{BlockDetails, BlockTransactionItem, StorageBlock, StoragePendingBlock};
use self::records::{
AccountTreeCache, BlockDetails, BlockTransactionItem, StorageBlock, StoragePendingBlock,
};
use crate::{
chain::{
operations::{
Expand Down Expand Up @@ -597,4 +599,52 @@ impl<'a> BlockSchema<'a> {
Ok(())
})
}

/// Stores account tree cache for a block
pub fn store_account_tree_cache(
&self,
block: BlockNumber,
tree_cache: serde_json::Value,
) -> QueryResult<()> {
use crate::schema::*;

if block == 0 {
return Ok(());
}

diesel::insert_into(account_tree_cache::table)
.values(&AccountTreeCache {
block: block as i64,
tree_cache,
})
.on_conflict(account_tree_cache::block)
.do_nothing()
.execute(self.0.conn())
.map(drop)
}

/// Gets stored account tree cache for a block
pub fn get_account_tree_cache(&self) -> QueryResult<Option<(BlockNumber, serde_json::Value)>> {
use crate::schema::*;
let account_tree_cache = account_tree_cache::table
.order_by(account_tree_cache::block.desc())
.first::<AccountTreeCache>(self.0.conn())
.optional()?;

Ok(account_tree_cache.map(|w| (w.block as BlockNumber, w.tree_cache)))
}

/// Gets stored account tree cache for a block
pub fn get_account_tree_cache_block(
&self,
block: BlockNumber,
) -> QueryResult<Option<serde_json::Value>> {
use crate::schema::*;
let account_tree_cache = account_tree_cache::table
.filter(account_tree_cache::block.eq(block as i64))
.first::<AccountTreeCache>(self.0.conn())
.optional()?;

Ok(account_tree_cache.map(|w| w.tree_cache))
}
}
7 changes: 7 additions & 0 deletions core/storage/src/chain/block/records.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,3 +71,10 @@ pub struct BlockTransactionItem {
#[sql_type = "Timestamp"]
pub created_at: NaiveDateTime,
}

#[derive(Debug, Clone, Insertable, Queryable, QueryableByName, Serialize, Deserialize)]
#[table_name = "account_tree_cache"]
pub struct AccountTreeCache {
pub block: i64,
pub tree_cache: serde_json::Value,
}
Loading

0 comments on commit c859c79

Please sign in to comment.