diff --git a/crates/indexer/migrations/2023-04-28-053048_object_token_v2/down.sql b/crates/indexer/migrations/2023-04-28-053048_object_token_v2/down.sql new file mode 100644 index 0000000000000..bce48fd6ab577 --- /dev/null +++ b/crates/indexer/migrations/2023-04-28-053048_object_token_v2/down.sql @@ -0,0 +1,36 @@ +-- This file should undo anything in `up.sql` +DROP TABLE IF EXISTS objects; +DROP INDEX IF EXISTS o_owner_idx; +DROP INDEX IF EXISTS o_object_skh_idx; +DROP INDEX IF EXISTS o_skh_idx; +DROP INDEX IF EXISTS o_insat_idx; +DROP TABLE IF EXISTS current_objects; +DROP INDEX IF EXISTS co_owner_idx; +DROP INDEX IF EXISTS co_object_skh_idx; +DROP INDEX IF EXISTS co_skh_idx; +DROP INDEX IF EXISTS co_insat_idx; +ALTER TABLE move_resources DROP COLUMN IF EXISTS state_key_hash; +DROP TABLE IF EXISTS token_ownerships_v2; +DROP INDEX IF EXISTS to2_id_index; +DROP INDEX IF EXISTS to2_owner_index; +DROP INDEX IF EXISTS to2_insat_index; +DROP TABLE IF EXISTS current_token_ownerships_v2; +DROP INDEX IF EXISTS curr_to2_owner_index; +DROP INDEX IF EXISTS curr_to2_wa_index; +DROP INDEX IF EXISTS curr_to2_insat_index; +DROP TABLE IF EXISTS collections_v2; +DROP INDEX IF EXISTS col2_id_index; +DROP INDEX IF EXISTS col2_crea_cn_index; +DROP INDEX IF EXISTS col2_insat_index; +DROP TABLE IF EXISTS current_collections_v2; +DROP INDEX IF EXISTS cur_col2_crea_cn_index; +DROP INDEX IF EXISTS cur_col2_insat_index; +DROP TABLE IF EXISTS token_datas_v2; +DROP INDEX IF EXISTS td2_id_index; +DROP INDEX IF EXISTS td2_cid_name_index; +DROP INDEX IF EXISTS td2_insat_index; +DROP TABLE IF EXISTS current_token_datas_v2; +DROP INDEX IF EXISTS cur_td2_cid_name_index; +DROP INDEX IF EXISTS cur_td2_insat_index; +ALTER TABLE current_token_pending_claims DROP COLUMN IF EXISTS token_data_id; +ALTER TABLE current_token_pending_claims DROP COLUMN IF EXISTS collection_id; \ No newline at end of file diff --git a/crates/indexer/migrations/2023-04-28-053048_object_token_v2/up.sql b/crates/indexer/migrations/2023-04-28-053048_object_token_v2/up.sql new file mode 100644 index 0000000000000..bf8f5f4d7929e --- /dev/null +++ b/crates/indexer/migrations/2023-04-28-053048_object_token_v2/up.sql @@ -0,0 +1,170 @@ +-- Your SQL goes here +-- objects, basically normalizing ObjectCore +CREATE TABLE IF NOT EXISTS objects ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + object_address VARCHAR(66) NOT NULL, + owner_address VARCHAR(66), + state_key_hash VARCHAR(66) NOT NULL, + guid_creation_num NUMERIC, + allow_ungated_transfer BOOLEAN, + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + -- constraints + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS o_owner_idx ON objects (owner_address); +CREATE INDEX IF NOT EXISTS o_object_skh_idx ON objects (object_address, state_key_hash); +CREATE INDEX IF NOT EXISTS o_skh_idx ON objects (state_key_hash); +CREATE INDEX IF NOT EXISTS o_insat_idx ON objects (inserted_at); +-- latest instance of objects +CREATE TABLE IF NOT EXISTS current_objects ( + object_address VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + owner_address VARCHAR(66) NOT NULL, + state_key_hash VARCHAR(66) NOT NULL, + allow_ungated_transfer BOOLEAN NOT NULL, + last_guid_creation_num NUMERIC NOT NULL, + last_transaction_version BIGINT NOT NULL, + is_deleted BOOLEAN NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS co_owner_idx ON current_objects (owner_address); +CREATE INDEX IF NOT EXISTS co_object_skh_idx ON current_objects (object_address, state_key_hash); +CREATE INDEX IF NOT EXISTS co_skh_idx ON current_objects (state_key_hash); +CREATE INDEX IF NOT EXISTS co_insat_idx ON current_objects (inserted_at); +-- Add this so that we can find resource groups by their state_key_hash +ALTER TABLE move_resources +ADD COLUMN IF NOT EXISTS state_key_hash VARCHAR(66) NOT NULL DEFAULT ''; +-- NFT stuff +-- tracks who owns tokens +CREATE TABLE IF NOT EXISTS token_ownerships_v2 ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + token_data_id VARCHAR(66) NOT NULL, + property_version_v1 NUMERIC NOT NULL, + owner_address VARCHAR(66), + storage_id VARCHAR(66) NOT NULL, + amount NUMERIC NOT NULL, + table_type_v1 VARCHAR(66), + token_properties_mutated_v1 JSONB, + is_soulbound_v2 BOOLEAN, + token_standard VARCHAR(10) NOT NULL, + is_fungible_v2 BOOLEAN, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS to2_id_index ON token_ownerships_v2 (token_data_id); +CREATE INDEX IF NOT EXISTS to2_owner_index ON token_ownerships_v2 (owner_address); +CREATE INDEX IF NOT EXISTS to2_insat_index ON token_ownerships_v2 (inserted_at); +CREATE TABLE IF NOT EXISTS current_token_ownerships_v2 ( + token_data_id VARCHAR(66) NOT NULL, + property_version_v1 NUMERIC NOT NULL, + owner_address VARCHAR(66) NOT NULL, + storage_id VARCHAR(66) NOT NULL, + amount NUMERIC NOT NULL, + table_type_v1 VARCHAR(66), + token_properties_mutated_v1 JSONB, + is_soulbound_v2 BOOLEAN, + token_standard VARCHAR(10) NOT NULL, + is_fungible_v2 BOOLEAN, + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY ( + token_data_id, + property_version_v1, + owner_address, + storage_id + ) +); +CREATE INDEX IF NOT EXISTS curr_to2_owner_index ON current_token_ownerships_v2 (owner_address); +CREATE INDEX IF NOT EXISTS curr_to2_wa_index ON current_token_ownerships_v2 (storage_id); +CREATE INDEX IF NOT EXISTS curr_to2_insat_index ON current_token_ownerships_v2 (inserted_at); +-- tracks collections +CREATE TABLE IF NOT EXISTS collections_v2 ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + collection_id VARCHAR(66) NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + description TEXT NOT NULL, + uri VARCHAR(512) NOT NULL, + current_supply NUMERIC NOT NULL, + max_supply NUMERIC, + total_minted_v2 NUMERIC, + mutable_description BOOLEAN, + mutable_uri BOOLEAN, + table_handle_v1 VARCHAR(66), + token_standard VARCHAR(10) NOT NULL, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS col2_id_index ON collections_v2 (collection_id); +CREATE INDEX IF NOT EXISTS col2_crea_cn_index ON collections_v2 (creator_address, collection_name); +CREATE INDEX IF NOT EXISTS col2_insat_index ON collections_v2 (inserted_at); +CREATE TABLE IF NOT EXISTS current_collections_v2 ( + collection_id VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + creator_address VARCHAR(66) NOT NULL, + collection_name VARCHAR(128) NOT NULL, + description TEXT NOT NULL, + uri VARCHAR(512) NOT NULL, + current_supply NUMERIC NOT NULL, + max_supply NUMERIC, + total_minted_v2 NUMERIC, + mutable_description BOOLEAN, + mutable_uri BOOLEAN, + table_handle_v1 VARCHAR(66), + token_standard VARCHAR(10) NOT NULL, + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS cur_col2_crea_cn_index ON current_collections_v2 (creator_address, collection_name); +CREATE INDEX IF NOT EXISTS cur_col2_insat_index ON current_collections_v2 (inserted_at); +-- tracks token metadata +CREATE TABLE IF NOT EXISTS token_datas_v2 ( + transaction_version BIGINT NOT NULL, + write_set_change_index BIGINT NOT NULL, + token_data_id VARCHAR(66) NOT NULL, + collection_id VARCHAR(66) NOT NULL, + token_name VARCHAR(128) NOT NULL, + maximum NUMERIC, + supply NUMERIC NOT NULL, + largest_property_version_v1 NUMERIC, + token_uri VARCHAR(512) NOT NULL, + token_properties JSONB NOT NULL, + description TEXT NOT NULL, + token_standard VARCHAR(10) NOT NULL, + is_fungible_v2 BOOLEAN, + transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW(), + PRIMARY KEY (transaction_version, write_set_change_index) +); +CREATE INDEX IF NOT EXISTS td2_id_index ON token_datas_v2 (token_data_id); +CREATE INDEX IF NOT EXISTS td2_cid_name_index ON token_datas_v2 (collection_id, token_name); +CREATE INDEX IF NOT EXISTS td2_insat_index ON token_datas_v2 (inserted_at); +CREATE TABLE IF NOT EXISTS current_token_datas_v2 ( + token_data_id VARCHAR(66) UNIQUE PRIMARY KEY NOT NULL, + collection_id VARCHAR(66) NOT NULL, + token_name VARCHAR(128) NOT NULL, + maximum NUMERIC, + supply NUMERIC NOT NULL, + largest_property_version_v1 NUMERIC, + token_uri VARCHAR(512) NOT NULL, + description TEXT NOT NULL, + token_properties JSONB NOT NULL, + token_standard VARCHAR(10) NOT NULL, + is_fungible_v2 BOOLEAN, + last_transaction_version BIGINT NOT NULL, + last_transaction_timestamp TIMESTAMP NOT NULL, + inserted_at TIMESTAMP NOT NULL DEFAULT NOW() +); +CREATE INDEX IF NOT EXISTS cur_td2_cid_name_index ON current_token_datas_v2 (collection_id, token_name); +CREATE INDEX IF NOT EXISTS cur_td2_insat_index ON current_token_datas_v2 (inserted_at); +-- Add ID (with 0x prefix) +ALTER TABLE current_token_pending_claims +ADD COLUMN IF NOT EXISTS token_data_id VARCHAR(66) NOT NULL DEFAULT ''; +ALTER TABLE current_token_pending_claims +ADD COLUMN IF NOT EXISTS collection_id VARCHAR(66) NOT NULL DEFAULT ''; \ No newline at end of file diff --git a/crates/indexer/src/models/coin_models/coin_activities.rs b/crates/indexer/src/models/coin_models/coin_activities.rs index 318b880cf8c48..a5f03c59dcf6b 100644 --- a/crates/indexer/src/models/coin_models/coin_activities.rs +++ b/crates/indexer/src/models/coin_models/coin_activities.rs @@ -29,7 +29,7 @@ const GAS_FEE_EVENT: &str = "0x1::aptos_coin::GasFeeEvent"; // We will never have a negative number on chain so this will avoid collision in postgres const BURN_GAS_EVENT_CREATION_NUM: i64 = -1; const BURN_GAS_EVENT_INDEX: i64 = -1; -const MAX_ENTRY_FUNCTION_LENGTH: usize = 100; +pub const MAX_ENTRY_FUNCTION_LENGTH: usize = 100; type OwnerAddress = String; type CoinType = String; diff --git a/crates/indexer/src/models/coin_models/coin_utils.rs b/crates/indexer/src/models/coin_models/coin_utils.rs index 3334331e8413e..27c6bd7ad8466 100644 --- a/crates/indexer/src/models/coin_models/coin_utils.rs +++ b/crates/indexer/src/models/coin_models/coin_utils.rs @@ -75,7 +75,7 @@ pub struct IntegerWrapperResource { impl IntegerWrapperResource { /// In case we do want to track supply - pub fn _get_supply(&self) -> Option { + pub fn get_supply(&self) -> Option { self.vec.get(0).map(|inner| inner.value.clone()) } } diff --git a/crates/indexer/src/models/coin_models/mod.rs b/crates/indexer/src/models/coin_models/mod.rs index 2c799367e5ddb..c748a32a04e99 100644 --- a/crates/indexer/src/models/coin_models/mod.rs +++ b/crates/indexer/src/models/coin_models/mod.rs @@ -5,4 +5,4 @@ pub mod coin_activities; pub mod coin_balances; pub mod coin_infos; pub mod coin_supply; -mod coin_utils; +pub mod coin_utils; diff --git a/crates/indexer/src/models/mod.rs b/crates/indexer/src/models/mod.rs index 8b8eb957df8b7..715e4070638ca 100644 --- a/crates/indexer/src/models/mod.rs +++ b/crates/indexer/src/models/mod.rs @@ -16,4 +16,5 @@ pub mod stake_models; pub mod token_models; pub mod transactions; pub mod user_transactions; +pub mod v2_objects; pub mod write_set_changes; diff --git a/crates/indexer/src/models/move_resources.rs b/crates/indexer/src/models/move_resources.rs index 8d08ee9206129..acdb30a0e17fe 100644 --- a/crates/indexer/src/models/move_resources.rs +++ b/crates/indexer/src/models/move_resources.rs @@ -23,6 +23,7 @@ pub struct MoveResource { pub generic_type_params: Option, pub data: Option, pub is_deleted: bool, + pub state_key_hash: String, } pub struct MoveStructTag { @@ -50,6 +51,7 @@ impl MoveResource { generic_type_params: parsed_data.generic_type_params, data: Some(serde_json::to_value(&write_resource.data.data).unwrap()), is_deleted: false, + state_key_hash: standardize_address(write_resource.state_key_hash.as_str()), } } @@ -71,6 +73,7 @@ impl MoveResource { generic_type_params: parsed_data.generic_type_params, data: None, is_deleted: true, + state_key_hash: standardize_address(delete_resource.state_key_hash.as_str()), } } diff --git a/crates/indexer/src/models/property_map.rs b/crates/indexer/src/models/property_map.rs index 548c7c48f6b67..eb06db07fbe14 100644 --- a/crates/indexer/src/models/property_map.rs +++ b/crates/indexer/src/models/property_map.rs @@ -52,3 +52,53 @@ impl PropertyMap { serde_json::to_value(map).unwrap() } } + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenObjectPropertyValue { + value: String, + typ: u8, +} + +pub fn create_token_object_property_value( + typ: u8, + value: String, +) -> Result { + Ok(TokenObjectPropertyValue { + value: util::convert_bcs_hex_new(typ, value.clone()).unwrap_or(value), + typ, + }) +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenObjectPropertyMap { + data: HashMap, +} + +impl TokenObjectPropertyMap { + /// Deserializes PropertyValue from bcs encoded json + pub fn from_bcs_encode_str(val: Value) -> Option { + let mut pm = TokenObjectPropertyMap { + data: HashMap::new(), + }; + let records: &Vec = val.get("data")?.as_array()?; + for entry in records { + let key = entry.get("key")?.as_str()?; + let val = entry.get("value")?.get("value")?.as_str()?; + let typ = entry.get("value")?.get("type")?.as_u64()?; + let pv = create_token_object_property_value(typ as u8, val.to_string()).ok()?; + pm.data.insert(key.to_string(), pv); + } + Some(Self::to_flat_json_new(pm)) + } + + /// Flattens PropertyMap which can't be easily consumable by downstream. + /// For example: Object {"data": Object {"creation_time_sec": Object {"value": String("1666125588")}}} + /// becomes Object {"creation_time_sec": "1666125588"} + fn to_flat_json_new(val: TokenObjectPropertyMap) -> Value { + let mut map = HashMap::new(); + for (k, v) in val.data { + map.insert(k, v.value); + } + serde_json::to_value(map).unwrap() + } +} diff --git a/crates/indexer/src/models/token_models/collection_datas.rs b/crates/indexer/src/models/token_models/collection_datas.rs index 7e85329f262b0..c07900624f6b5 100644 --- a/crates/indexer/src/models/token_models/collection_datas.rs +++ b/crates/indexer/src/models/token_models/collection_datas.rs @@ -21,8 +21,8 @@ use diesel::{prelude::*, ExpressionMethods}; use field_count::FieldCount; use serde::{Deserialize, Serialize}; -const QUERY_RETRIES: u32 = 5; -const QUERY_RETRY_DELAY_MS: u64 = 500; +pub const QUERY_RETRIES: u32 = 5; +pub const QUERY_RETRY_DELAY_MS: u64 = 500; #[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] #[diesel(primary_key(collection_data_id_hash, transaction_version))] #[diesel(table_name = collection_datas)] diff --git a/crates/indexer/src/models/token_models/mod.rs b/crates/indexer/src/models/token_models/mod.rs index 81a879e1fea68..55a82814d2fb9 100644 --- a/crates/indexer/src/models/token_models/mod.rs +++ b/crates/indexer/src/models/token_models/mod.rs @@ -10,3 +10,7 @@ pub mod token_datas; pub mod token_ownerships; pub mod token_utils; pub mod tokens; +pub mod v2_collections; +pub mod v2_token_datas; +pub mod v2_token_ownerships; +pub mod v2_token_utils; diff --git a/crates/indexer/src/models/token_models/token_claims.rs b/crates/indexer/src/models/token_models/token_claims.rs index 83baf8ce47f62..bced70af52cac 100644 --- a/crates/indexer/src/models/token_models/token_claims.rs +++ b/crates/indexer/src/models/token_models/token_claims.rs @@ -28,6 +28,8 @@ pub struct CurrentTokenPendingClaim { pub table_handle: String, pub last_transaction_version: i64, pub last_transaction_timestamp: chrono::NaiveDateTime, + pub token_data_id: String, + pub collection_id: String, } impl CurrentTokenPendingClaim { @@ -65,11 +67,15 @@ impl CurrentTokenPendingClaim { if let Some(table_metadata) = maybe_table_metadata { let token_id = offer.token_id; - let token_data_id = token_id.token_data_id; - let collection_data_id_hash = token_data_id.get_collection_data_id_hash(); - let token_data_id_hash = token_data_id.to_hash(); - let collection_name = token_data_id.get_collection_trunc(); - let name = token_data_id.get_name_trunc(); + let token_data_id_struct = token_id.token_data_id; + let collection_data_id_hash = + token_data_id_struct.get_collection_data_id_hash(); + let token_data_id_hash = token_data_id_struct.to_hash(); + // Basically adding 0x prefix to the previous 2 lines. This is to be consistent with Token V2 + let collection_id = token_data_id_struct.get_collection_id(); + let token_data_id = token_data_id_struct.to_id(); + let collection_name = token_data_id_struct.get_collection_trunc(); + let name = token_data_id_struct.get_name_trunc(); return Ok(Some(Self { token_data_id_hash, @@ -77,13 +83,15 @@ impl CurrentTokenPendingClaim { from_address: standardize_address(&table_metadata.owner_address), to_address: standardize_address(&offer.to_addr), collection_data_id_hash, - creator_address: standardize_address(&token_data_id.creator), + creator_address: standardize_address(&token_data_id_struct.creator), collection_name, name, amount: token.amount, table_handle, last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, + token_data_id, + collection_id, })); } else { aptos_logger::warn!( @@ -133,11 +141,14 @@ impl CurrentTokenPendingClaim { }); let token_id = offer.token_id; - let token_data_id = token_id.token_data_id; - let collection_data_id_hash = token_data_id.get_collection_data_id_hash(); - let token_data_id_hash = token_data_id.to_hash(); - let collection_name = token_data_id.get_collection_trunc(); - let name = token_data_id.get_name_trunc(); + let token_data_id_struct = token_id.token_data_id; + let collection_data_id_hash = token_data_id_struct.get_collection_data_id_hash(); + let token_data_id_hash = token_data_id_struct.to_hash(); + // Basically adding 0x prefix to the previous 2 lines. This is to be consistent with Token V2 + let collection_id = token_data_id_struct.get_collection_id(); + let token_data_id = token_data_id_struct.to_id(); + let collection_name = token_data_id_struct.get_collection_trunc(); + let name = token_data_id_struct.get_name_trunc(); return Ok(Some(Self { token_data_id_hash, @@ -145,13 +156,15 @@ impl CurrentTokenPendingClaim { from_address: standardize_address(&table_metadata.owner_address), to_address: standardize_address(&offer.to_addr), collection_data_id_hash, - creator_address: standardize_address(&token_data_id.creator), + creator_address: standardize_address(&token_data_id_struct.creator), collection_name, name, amount: BigDecimal::zero(), table_handle, last_transaction_version: txn_version, last_transaction_timestamp: txn_timestamp, + token_data_id, + collection_id, })); } Ok(None) diff --git a/crates/indexer/src/models/token_models/token_utils.rs b/crates/indexer/src/models/token_models/token_utils.rs index 78b663dad5157..267474131dcf5 100644 --- a/crates/indexer/src/models/token_models/token_utils.rs +++ b/crates/indexer/src/models/token_models/token_utils.rs @@ -14,8 +14,8 @@ use bigdecimal::BigDecimal; use serde::{Deserialize, Serialize}; use std::fmt::{self, Formatter}; -const NAME_LENGTH: usize = 128; -const URI_LENGTH: usize = 512; +pub const NAME_LENGTH: usize = 128; +pub const URI_LENGTH: usize = 512; /** * This file defines deserialized move types as defined in our 0x3 contracts. */ @@ -33,6 +33,10 @@ pub struct TokenDataIdType { } impl TokenDataIdType { + pub fn to_id(&self) -> String { + format!("0x{}", self.to_hash()) + } + pub fn to_hash(&self) -> String { hash_str(&self.to_string()) } @@ -48,6 +52,10 @@ impl TokenDataIdType { pub fn get_collection_data_id_hash(&self) -> String { CollectionDataIdType::new(self.creator.clone(), self.collection.clone()).to_hash() } + + pub fn get_collection_id(&self) -> String { + CollectionDataIdType::new(self.creator.clone(), self.collection.clone()).to_id() + } } impl fmt::Display for TokenDataIdType { @@ -77,6 +85,10 @@ impl CollectionDataIdType { hash_str(&self.to_string()) } + pub fn to_id(&self) -> String { + format!("0x{}", self.to_hash()) + } + pub fn get_name_trunc(&self) -> String { truncate_str(&self.name, NAME_LENGTH) } diff --git a/crates/indexer/src/models/token_models/v2_collections.rs b/crates/indexer/src/models/token_models/v2_collections.rs new file mode 100644 index 0000000000000..82b8c9c65f4e8 --- /dev/null +++ b/crates/indexer/src/models/token_models/v2_collections.rs @@ -0,0 +1,300 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + token_utils::{CollectionDataIdType, TokenWriteSet}, + tokens::TableHandleToOwner, + v2_token_utils::{TokenStandard, TokenV2AggregatedDataMapping, V2TokenResource}, +}; +use crate::{ + database::PgPoolConnection, + models::move_resources::MoveResource, + schema::{collections_v2, current_collections_v2}, + util::standardize_address, +}; +use anyhow::Context; +use aptos_api_types::{WriteResource as APIWriteResource, WriteTableItem as APIWriteTableItem}; +use bigdecimal::{BigDecimal, Zero}; +use diesel::{prelude::*, sql_query, sql_types::Text}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +// PK of current_collections_v2, i.e. collection_id +pub type CurrentCollectionV2PK = String; + +const QUERY_RETRIES: u32 = 5; +const QUERY_RETRY_DELAY_MS: u64 = 500; + +#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = collections_v2)] +pub struct CollectionV2 { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub collection_id: String, + pub creator_address: String, + pub collection_name: String, + pub description: String, + pub uri: String, + pub current_supply: BigDecimal, + pub max_supply: Option, + pub total_minted_v2: Option, + pub mutable_description: Option, + pub mutable_uri: Option, + pub table_handle_v1: Option, + pub token_standard: String, + pub transaction_timestamp: chrono::NaiveDateTime, +} + +#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(collection_id))] +#[diesel(table_name = current_collections_v2)] +pub struct CurrentCollectionV2 { + pub collection_id: String, + pub creator_address: String, + pub collection_name: String, + pub description: String, + pub uri: String, + pub current_supply: BigDecimal, + pub max_supply: Option, + pub total_minted_v2: Option, + pub mutable_description: Option, + pub mutable_uri: Option, + pub table_handle_v1: Option, + pub token_standard: String, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +#[derive(Debug, QueryableByName)] +pub struct CreatorFromCollectionTableV1 { + #[diesel(sql_type = Text)] + pub creator_address: String, +} + +impl CollectionV2 { + pub fn get_v2_from_write_resource( + write_resource: &APIWriteResource, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + token_v2_metadata: &TokenV2AggregatedDataMapping, + ) -> anyhow::Result> { + let type_str = format!( + "{}::{}::{}", + write_resource.data.typ.address, + write_resource.data.typ.module, + write_resource.data.typ.name + ); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::Collection(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + let (mut current_supply, mut max_supply, mut total_minted_v2) = + (BigDecimal::zero(), None, None); + let (mut mutable_description, mut mutable_uri) = (None, None); + if let Some(metadata) = token_v2_metadata.get(&resource.address) { + // Getting supply data (prefer fixed supply over unlimited supply although they should never appear at the same time anyway) + let fixed_supply = metadata.fixed_supply.as_ref(); + let unlimited_supply = metadata.unlimited_supply.as_ref(); + if let Some(supply) = unlimited_supply { + (current_supply, max_supply, total_minted_v2) = ( + supply.current_supply.clone(), + None, + Some(supply.total_minted.clone()), + ); + } + if let Some(supply) = fixed_supply { + (current_supply, max_supply, total_minted_v2) = ( + supply.current_supply.clone(), + Some(supply.max_supply.clone()), + Some(supply.total_minted.clone()), + ); + } + + // Getting collection mutability config from AptosCollection + let collection = metadata.aptos_collection.as_ref(); + if let Some(collection) = collection { + mutable_description = Some(collection.mutable_description); + mutable_uri = Some(collection.mutable_uri); + } + } else { + // ObjectCore should not be missing, returning from entire function early + return Ok(None); + } + + let collection_id = resource.address.clone(); + let creator_address = inner.creator.clone(); + let collection_name = inner.get_name_trunc(); + let description = inner.description.clone(); + let uri = inner.get_uri_trunc(); + + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + collection_id: collection_id.clone(), + creator_address: creator_address.clone(), + collection_name: collection_name.clone(), + description: description.clone(), + uri: uri.clone(), + current_supply: current_supply.clone(), + max_supply: max_supply.clone(), + total_minted_v2: total_minted_v2.clone(), + mutable_description, + mutable_uri, + table_handle_v1: None, + token_standard: TokenStandard::V2.to_string(), + transaction_timestamp: txn_timestamp, + }, + CurrentCollectionV2 { + collection_id, + creator_address, + collection_name, + description, + uri, + current_supply, + max_supply, + total_minted_v2, + mutable_description, + mutable_uri, + table_handle_v1: None, + token_standard: TokenStandard::V2.to_string(), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + }, + ))) + } else { + Ok(None) + } + } + + pub fn get_v1_from_write_table_item( + table_item: &APIWriteTableItem, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + table_handle_to_owner: &TableHandleToOwner, + conn: &mut PgPoolConnection, + ) -> anyhow::Result> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_collection_data = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? { + Some(TokenWriteSet::CollectionData(inner)) => Some(inner), + _ => None, + }; + if let Some(collection_data) = maybe_collection_data { + let table_handle = table_item.handle.to_string(); + let maybe_creator_address = table_handle_to_owner + .get(&standardize_address(&table_handle)) + .map(|table_metadata| table_metadata.owner_address.clone()); + let mut creator_address = match maybe_creator_address { + Some(ca) => ca, + None => { + Self::get_collection_creator_for_v1(conn, &table_handle).context(format!( + "Failed to get collection creator for table handle {}, txn version {}", + table_handle, txn_version + ))? + }, + }; + creator_address = standardize_address(&creator_address); + let collection_id_struct = + CollectionDataIdType::new(creator_address, collection_data.get_name().to_string()); + let collection_id = collection_id_struct.to_id(); + let collection_name = collection_data.get_name_trunc(); + let uri = collection_data.get_uri_trunc(); + + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + collection_id: collection_id.clone(), + creator_address: collection_id_struct.creator.clone(), + collection_name: collection_name.clone(), + description: collection_data.description.clone(), + uri: uri.clone(), + current_supply: collection_data.supply.clone(), + max_supply: Some(collection_data.maximum.clone()), + total_minted_v2: None, + mutable_uri: Some(collection_data.mutability_config.uri), + mutable_description: Some(collection_data.mutability_config.description), + table_handle_v1: Some(table_handle.clone()), + token_standard: TokenStandard::V1.to_string(), + transaction_timestamp: txn_timestamp, + }, + CurrentCollectionV2 { + collection_id, + creator_address: collection_id_struct.creator, + collection_name, + description: collection_data.description, + uri, + current_supply: collection_data.supply, + max_supply: Some(collection_data.maximum.clone()), + total_minted_v2: None, + mutable_uri: Some(collection_data.mutability_config.uri), + mutable_description: Some(collection_data.mutability_config.description), + table_handle_v1: Some(table_handle), + token_standard: TokenStandard::V1.to_string(), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + }, + ))) + } else { + Ok(None) + } + } + + /// If collection data is not in resources of the same transaction, then try looking for it in the database. Since collection owner + /// cannot change, we can just look in the current_collection_datas table. + /// Retrying a few times since this collection could've been written in a separate thread. + pub fn get_collection_creator_for_v1( + conn: &mut PgPoolConnection, + table_handle: &str, + ) -> anyhow::Result { + let mut retried = 0; + while retried < QUERY_RETRIES { + retried += 1; + match Self::get_by_table_handle(conn, table_handle) { + Ok(creator) => return Ok(creator), + Err(_) => { + std::thread::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)); + }, + } + } + Err(anyhow::anyhow!("Failed to get collection creator")) + } + + /// TODO: Change this to a KV store + pub fn get_by_table_handle( + conn: &mut PgPoolConnection, + table_handle: &str, + ) -> anyhow::Result { + let mut res: Vec> = sql_query( + "SELECT creator_address FROM current_collections_v2 WHERE table_handle_v1 = $1", + ) + .bind::(table_handle) + .get_results(conn)?; + Ok(res + .pop() + .context("collection result empty")? + .context("collection result null")? + .creator_address) + } +} diff --git a/crates/indexer/src/models/token_models/v2_token_datas.rs b/crates/indexer/src/models/token_models/v2_token_datas.rs new file mode 100644 index 0000000000000..1c85b60d7a84d --- /dev/null +++ b/crates/indexer/src/models/token_models/v2_token_datas.rs @@ -0,0 +1,226 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + token_utils::TokenWriteSet, + v2_token_utils::{TokenStandard, TokenV2AggregatedDataMapping, V2TokenResource}, +}; +use crate::{ + models::move_resources::MoveResource, + schema::{current_token_datas_v2, token_datas_v2}, +}; +use aptos_api_types::{WriteResource as APIWriteResource, WriteTableItem as APIWriteTableItem}; +use bigdecimal::{BigDecimal, Zero}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; + +// PK of current_token_datas_v2, i.e. token_data_id +pub type CurrentTokenDataV2PK = String; + +#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = token_datas_v2)] +pub struct TokenDataV2 { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub token_data_id: String, + pub collection_id: String, + pub token_name: String, + pub maximum: Option, + pub supply: BigDecimal, + pub largest_property_version_v1: Option, + pub token_uri: String, + pub token_properties: serde_json::Value, + pub description: String, + pub token_standard: String, + pub is_fungible_v2: Option, + pub transaction_timestamp: chrono::NaiveDateTime, +} + +#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(token_data_id))] +#[diesel(table_name = current_token_datas_v2)] +pub struct CurrentTokenDataV2 { + pub token_data_id: String, + pub collection_id: String, + pub token_name: String, + pub maximum: Option, + pub supply: BigDecimal, + pub largest_property_version_v1: Option, + pub token_uri: String, + pub token_properties: serde_json::Value, + pub description: String, + pub token_standard: String, + pub is_fungible_v2: Option, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +impl TokenDataV2 { + pub fn get_v2_from_write_resource( + write_resource: &APIWriteResource, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + token_v2_metadata: &TokenV2AggregatedDataMapping, + ) -> anyhow::Result> { + let type_str = format!( + "{}::{}::{}", + write_resource.data.typ.address, + write_resource.data.typ.module, + write_resource.data.typ.name + ); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::Token(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + // Get maximum, supply, and is fungible from fungible asset if this is a fungible token + let (maximum, supply, is_fungible_v2) = (None, BigDecimal::zero(), Some(false)); + // Get token properties from 0x4::property_map::PropertyMap + let mut token_properties = serde_json::Value::Null; + if let Some(metadata) = token_v2_metadata.get(&resource.address) { + token_properties = metadata + .property_map + .as_ref() + .map(|m| m.inner.clone()) + .unwrap_or(token_properties); + } else { + // ObjectCore should not be missing, returning from entire function early + return Ok(None); + } + + let collection_id = inner.collection.inner.clone(); + let token_data_id = resource.address; + let token_name = inner.get_name_trunc(); + let token_uri = inner.get_uri_trunc(); + + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id: token_data_id.clone(), + collection_id: collection_id.clone(), + token_name: token_name.clone(), + maximum: maximum.clone(), + supply: supply.clone(), + largest_property_version_v1: None, + token_uri: token_uri.clone(), + token_properties: token_properties.clone(), + description: inner.description.clone(), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2, + transaction_timestamp: txn_timestamp, + }, + CurrentTokenDataV2 { + token_data_id, + collection_id, + token_name, + maximum, + supply, + largest_property_version_v1: None, + token_uri, + token_properties, + description: inner.description, + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + }, + ))) + } else { + Ok(None) + } + } + + pub fn get_v1_from_write_table_item( + table_item: &APIWriteTableItem, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + ) -> anyhow::Result> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_token_data = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? { + Some(TokenWriteSet::TokenData(inner)) => Some(inner), + _ => None, + }; + + if let Some(token_data) = maybe_token_data { + let maybe_token_data_id = match TokenWriteSet::from_table_item_type( + table_item_data.key_type.as_str(), + &table_item_data.key, + txn_version, + )? { + Some(TokenWriteSet::TokenDataId(inner)) => Some(inner), + _ => None, + }; + if let Some(token_data_id_struct) = maybe_token_data_id { + let collection_id = token_data_id_struct.get_collection_id(); + let token_data_id = token_data_id_struct.to_id(); + let token_name = token_data_id_struct.get_name_trunc(); + let token_uri = token_data.get_uri_trunc(); + + return Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id: token_data_id.clone(), + collection_id: collection_id.clone(), + token_name: token_name.clone(), + maximum: Some(token_data.maximum.clone()), + supply: token_data.supply.clone(), + largest_property_version_v1: Some( + token_data.largest_property_version.clone(), + ), + token_uri: token_uri.clone(), + token_properties: token_data.default_properties.clone(), + description: token_data.description.clone(), + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + transaction_timestamp: txn_timestamp, + }, + CurrentTokenDataV2 { + token_data_id, + collection_id, + token_name, + maximum: Some(token_data.maximum), + supply: token_data.supply, + largest_property_version_v1: Some(token_data.largest_property_version), + token_uri, + token_properties: token_data.default_properties, + description: token_data.description, + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + }, + ))); + } else { + aptos_logger::warn!( + transaction_version = txn_version, + key_type = table_item_data.key_type, + key = table_item_data.key, + "Expecting token_data_id as key for value = token_data" + ); + } + } + Ok(None) + } +} diff --git a/crates/indexer/src/models/token_models/v2_token_ownerships.rs b/crates/indexer/src/models/token_models/v2_token_ownerships.rs new file mode 100644 index 0000000000000..9b12bdb6f57ea --- /dev/null +++ b/crates/indexer/src/models/token_models/v2_token_ownerships.rs @@ -0,0 +1,462 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::{ + collection_datas::{QUERY_RETRIES, QUERY_RETRY_DELAY_MS}, + token_utils::TokenWriteSet, + tokens::TableHandleToOwner, + v2_token_datas::TokenDataV2, + v2_token_utils::{ObjectCore, TokenStandard, TokenV2AggregatedDataMapping, TokenV2Burned}, +}; +use crate::{ + database::PgPoolConnection, + schema::{current_token_ownerships_v2, token_ownerships_v2}, + util::{ensure_not_negative, standardize_address}, +}; +use anyhow::Context; +use aptos_api_types::{ + DeleteResource, DeleteTableItem as APIDeleteTableItem, WriteResource, + WriteTableItem as APIWriteTableItem, +}; +use bigdecimal::{BigDecimal, One, Zero}; +use diesel::{prelude::*, ExpressionMethods}; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +// PK of current_token_ownerships_v2, i.e. token_data_id, property_version_v1, owner_address, storage_id +pub type CurrentTokenOwnershipV2PK = (String, BigDecimal, String, String); + +#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = token_ownerships_v2)] +pub struct TokenOwnershipV2 { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub token_data_id: String, + pub property_version_v1: BigDecimal, + pub owner_address: Option, + pub storage_id: String, + pub amount: BigDecimal, + pub table_type_v1: Option, + pub token_properties_mutated_v1: Option, + pub is_soulbound_v2: Option, + pub token_standard: String, + pub is_fungible_v2: Option, + pub transaction_timestamp: chrono::NaiveDateTime, +} + +#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(token_data_id, property_version_v1, owner_address, storage_id))] +#[diesel(table_name = current_token_ownerships_v2)] +pub struct CurrentTokenOwnershipV2 { + pub token_data_id: String, + pub property_version_v1: BigDecimal, + pub owner_address: String, + pub storage_id: String, + pub amount: BigDecimal, + pub table_type_v1: Option, + pub token_properties_mutated_v1: Option, + pub is_soulbound_v2: Option, + pub token_standard: String, + pub is_fungible_v2: Option, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, +} + +// Facilitate tracking when a token is burned +#[derive(Clone, Debug)] +pub struct NFTOwnershipV2 { + pub token_data_id: String, + pub owner_address: String, + pub is_soulbound: Option, +} + +/// Need a separate struct for queryable because we don't want to define the inserted_at column (letting DB fill) +#[derive(Debug, Identifiable, Queryable)] +#[diesel(primary_key(token_data_id, property_version_v1, owner_address, storage_id))] +#[diesel(table_name = current_token_ownerships_v2)] +pub struct CurrentTokenOwnershipV2Query { + pub token_data_id: String, + pub property_version_v1: BigDecimal, + pub owner_address: String, + pub storage_id: String, + pub amount: BigDecimal, + pub table_type_v1: Option, + pub token_properties_mutated_v1: Option, + pub is_soulbound_v2: Option, + pub token_standard: String, + pub is_fungible_v2: Option, + pub last_transaction_version: i64, + pub last_transaction_timestamp: chrono::NaiveDateTime, + pub inserted_at: chrono::NaiveDateTime, +} + +impl TokenOwnershipV2 { + /// For nfts it's the same resources that we parse tokendatas from so we leverage the work done in there to get ownership data + pub fn get_nft_v2_from_token_data( + token_data: &TokenDataV2, + token_v2_metadata: &TokenV2AggregatedDataMapping, + ) -> anyhow::Result<(Self, CurrentTokenOwnershipV2)> { + let metadata = token_v2_metadata + .get(&token_data.token_data_id) + .context("If token data exists objectcore must exist")?; + let object_core = metadata.object.clone(); + let token_data_id = token_data.token_data_id.clone(); + let owner_address = object_core.owner.clone(); + let storage_id = token_data_id.clone(); + let is_soulbound = !object_core.allow_ungated_transfer; + + Ok(( + Self { + transaction_version: token_data.transaction_version, + write_set_change_index: token_data.write_set_change_index, + token_data_id: token_data_id.clone(), + property_version_v1: BigDecimal::zero(), + owner_address: Some(owner_address.clone()), + storage_id: storage_id.clone(), + amount: BigDecimal::one(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: token_data.is_fungible_v2, + transaction_timestamp: token_data.transaction_timestamp, + }, + CurrentTokenOwnershipV2 { + token_data_id, + property_version_v1: BigDecimal::zero(), + owner_address, + storage_id, + amount: BigDecimal::one(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: token_data.is_fungible_v2, + last_transaction_version: token_data.transaction_version, + last_transaction_timestamp: token_data.transaction_timestamp, + }, + )) + } + + /// This handles the case where token is burned but objectCore is still there + pub fn get_burned_nft_v2_from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + tokens_burned: &TokenV2Burned, + ) -> anyhow::Result> { + if let Some(token_address) = + tokens_burned.get(&standardize_address(&write_resource.address.to_string())) + { + if let Some(object_core) = ObjectCore::from_write_resource(write_resource, txn_version)? + { + let token_data_id = token_address.clone(); + let owner_address = object_core.owner.clone(); + let storage_id = token_data_id.clone(); + let is_soulbound = !object_core.allow_ungated_transfer; + + return Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id: token_data_id.clone(), + property_version_v1: BigDecimal::zero(), + owner_address: Some(owner_address.clone()), + storage_id: storage_id.clone(), + amount: BigDecimal::zero(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(false), + transaction_timestamp: txn_timestamp, + }, + CurrentTokenOwnershipV2 { + token_data_id, + property_version_v1: BigDecimal::zero(), + owner_address, + storage_id, + amount: BigDecimal::zero(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: Some(is_soulbound), + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(false), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + }, + ))); + } + } + Ok(None) + } + + /// This handles the case where token is burned and objectCore is deleted + pub fn get_burned_nft_v2_from_delete_resource( + write_resource: &DeleteResource, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + prior_nft_ownership: &HashMap, + tokens_burned: &TokenV2Burned, + conn: &mut PgPoolConnection, + ) -> anyhow::Result> { + if let Some(token_address) = + tokens_burned.get(&standardize_address(&write_resource.address.to_string())) + { + let latest_nft_ownership: NFTOwnershipV2 = match prior_nft_ownership.get(token_address) + { + Some(inner) => inner.clone(), + None => { + CurrentTokenOwnershipV2Query::get_nft_by_token_data_id(conn, token_address)? + }, + }; + + let token_data_id = token_address.clone(); + let owner_address = latest_nft_ownership.owner_address.clone(); + let storage_id = token_data_id.clone(); + let is_soulbound = latest_nft_ownership.is_soulbound; + + return Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id: token_data_id.clone(), + property_version_v1: BigDecimal::zero(), + owner_address: Some(owner_address.clone()), + storage_id: storage_id.clone(), + amount: BigDecimal::zero(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: is_soulbound, + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(false), + transaction_timestamp: txn_timestamp, + }, + CurrentTokenOwnershipV2 { + token_data_id, + property_version_v1: BigDecimal::zero(), + owner_address, + storage_id, + amount: BigDecimal::zero(), + table_type_v1: None, + token_properties_mutated_v1: None, + is_soulbound_v2: is_soulbound, + token_standard: TokenStandard::V2.to_string(), + is_fungible_v2: Some(false), + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + }, + ))); + } + Ok(None) + } + + /// We want to track tokens in any offer/claims and tokenstore + pub fn get_v1_from_write_table_item( + table_item: &APIWriteTableItem, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + table_handle_to_owner: &TableHandleToOwner, + ) -> anyhow::Result)>> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_token = match TokenWriteSet::from_table_item_type( + table_item_data.value_type.as_str(), + &table_item_data.value, + txn_version, + )? { + Some(TokenWriteSet::Token(inner)) => Some(inner), + _ => None, + }; + + if let Some(token) = maybe_token { + let table_handle = standardize_address(&table_item.handle.to_string()); + let amount = ensure_not_negative(token.amount); + let token_id_struct = token.id; + let token_data_id_struct = token_id_struct.token_data_id; + let token_data_id = token_data_id_struct.to_id(); + + let maybe_table_metadata = table_handle_to_owner.get(&table_handle); + let (curr_token_ownership, owner_address, table_type) = match maybe_table_metadata { + Some(tm) => { + let owner_address = standardize_address(&tm.owner_address); + ( + Some(CurrentTokenOwnershipV2 { + token_data_id: token_data_id.clone(), + property_version_v1: token_id_struct.property_version.clone(), + owner_address: owner_address.clone(), + storage_id: table_handle.clone(), + amount: amount.clone(), + table_type_v1: Some(tm.table_type.clone()), + token_properties_mutated_v1: Some(token.token_properties.clone()), + is_soulbound_v2: None, + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + }), + Some(owner_address), + Some(tm.table_type.clone()), + ) + }, + None => { + aptos_logger::warn!( + transaction_version = txn_version, + table_handle = table_handle, + "Missing table handle metadata for TokenStore. {:?}", + table_handle_to_owner + ); + (None, None, None) + }, + }; + + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id, + property_version_v1: token_id_struct.property_version, + owner_address, + storage_id: table_handle, + amount, + table_type_v1: table_type, + token_properties_mutated_v1: Some(token.token_properties), + is_soulbound_v2: None, + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + transaction_timestamp: txn_timestamp, + }, + curr_token_ownership, + ))) + } else { + Ok(None) + } + } + + /// We want to track tokens in any offer/claims and tokenstore + pub fn get_v1_from_delete_table_item( + table_item: &APIDeleteTableItem, + txn_version: i64, + write_set_change_index: i64, + txn_timestamp: chrono::NaiveDateTime, + table_handle_to_owner: &TableHandleToOwner, + ) -> anyhow::Result)>> { + let table_item_data = table_item.data.as_ref().unwrap(); + + let maybe_token_id = match TokenWriteSet::from_table_item_type( + table_item_data.key_type.as_str(), + &table_item_data.key, + txn_version, + )? { + Some(TokenWriteSet::TokenId(inner)) => Some(inner), + _ => None, + }; + + if let Some(token_id_struct) = maybe_token_id { + let table_handle = standardize_address(&table_item.handle.to_string()); + let token_data_id_struct = token_id_struct.token_data_id; + let token_data_id = token_data_id_struct.to_id(); + + let maybe_table_metadata = table_handle_to_owner.get(&table_handle); + let (curr_token_ownership, owner_address, table_type) = match maybe_table_metadata { + Some(tm) => { + let owner_address = standardize_address(&tm.owner_address); + ( + Some(CurrentTokenOwnershipV2 { + token_data_id: token_data_id.clone(), + property_version_v1: token_id_struct.property_version.clone(), + owner_address: owner_address.clone(), + storage_id: table_handle.clone(), + amount: BigDecimal::zero(), + table_type_v1: Some(tm.table_type.clone()), + token_properties_mutated_v1: None, + is_soulbound_v2: None, + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + last_transaction_version: txn_version, + last_transaction_timestamp: txn_timestamp, + }), + Some(owner_address), + Some(tm.table_type.clone()), + ) + }, + None => { + aptos_logger::warn!( + transaction_version = txn_version, + table_handle = table_handle, + "Missing table handle metadata for TokenStore. {:?}", + table_handle_to_owner + ); + (None, None, None) + }, + }; + + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + token_data_id, + property_version_v1: token_id_struct.property_version, + owner_address, + storage_id: table_handle, + amount: BigDecimal::zero(), + table_type_v1: table_type, + token_properties_mutated_v1: None, + is_soulbound_v2: None, + token_standard: TokenStandard::V1.to_string(), + is_fungible_v2: None, + transaction_timestamp: txn_timestamp, + }, + curr_token_ownership, + ))) + } else { + Ok(None) + } + } +} + +impl CurrentTokenOwnershipV2Query { + pub fn get_nft_by_token_data_id( + conn: &mut PgPoolConnection, + token_data_id: &str, + ) -> anyhow::Result { + let mut retried = 0; + while retried < QUERY_RETRIES { + retried += 1; + match Self::get_nft_by_token_data_id_impl(conn, token_data_id) { + Ok(inner) => { + return Ok(NFTOwnershipV2 { + token_data_id: inner.token_data_id.clone(), + owner_address: inner.owner_address.clone(), + is_soulbound: inner.is_soulbound_v2, + }) + }, + Err(_) => { + std::thread::sleep(std::time::Duration::from_millis(QUERY_RETRY_DELAY_MS)); + }, + } + } + Err(anyhow::anyhow!( + "Failed to get nft by token data id: {}", + token_data_id + )) + } + + fn get_nft_by_token_data_id_impl( + conn: &mut PgPoolConnection, + token_data_id: &str, + ) -> diesel::QueryResult { + current_token_ownerships_v2::table + .filter(current_token_ownerships_v2::token_data_id.eq(token_data_id)) + .first::(conn) + } +} diff --git a/crates/indexer/src/models/token_models/v2_token_utils.rs b/crates/indexer/src/models/token_models/v2_token_utils.rs new file mode 100644 index 0000000000000..40f9df854cf32 --- /dev/null +++ b/crates/indexer/src/models/token_models/v2_token_utils.rs @@ -0,0 +1,396 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] + +use super::token_utils::{NAME_LENGTH, URI_LENGTH}; +use crate::{ + models::{move_resources::MoveResource, v2_objects::CurrentObjectPK}, + util::{ + deserialize_token_object_property_map_from_bcs_hexstring, standardize_address, truncate_str, + }, +}; +use anyhow::{Context, Result}; +use aptos_api_types::{deserialize_from_string, Event, WriteResource}; +use bigdecimal::BigDecimal; +use serde::{Deserialize, Serialize}; +use std::{ + collections::{HashMap, HashSet}, + fmt::{self, Formatter}, +}; + +/// Tracks all token related data in a hashmap for quick access (keyed on address of the object core) +pub type TokenV2AggregatedDataMapping = HashMap; +/// Tracks all token related data in a hashmap for quick access (keyed on address of the object core) +pub type TokenV2Burned = HashSet; + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct TokenV2AggregatedData { + pub aptos_collection: Option, + pub fixed_supply: Option, + pub object: ObjectCore, + pub unlimited_supply: Option, + pub property_map: Option, +} + +/// Tracks which token standard a token / collection is built upon +#[derive(Serialize)] +pub enum TokenStandard { + V1, + V2, +} + +impl fmt::Display for TokenStandard { + fn fmt(&self, f: &mut Formatter) -> std::fmt::Result { + let res = match self { + TokenStandard::V1 => "v1", + TokenStandard::V2 => "v2", + }; + write!(f, "{}", res) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ObjectCore { + pub allow_ungated_transfer: bool, + #[serde(deserialize_with = "deserialize_from_string")] + pub guid_creation_num: BigDecimal, + pub owner: String, +} + +impl ObjectCore { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = format!( + "{}::{}::{}", + write_resource.data.typ.address, + write_resource.data.typ.module, + write_resource.data.typ.name + ); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + if let V2TokenResource::ObjectCore(inner) = V2TokenResource::from_resource( + &type_str, + &serde_json::to_value(&write_resource.data.data).unwrap(), + txn_version, + )? { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Collection { + pub creator: String, + pub description: String, + // These are set to private because we should never get name or uri directly + name: String, + uri: String, +} + +impl Collection { + pub fn get_uri_trunc(&self) -> String { + truncate_str(&self.uri, URI_LENGTH) + } + + pub fn get_name_trunc(&self) -> String { + truncate_str(&self.name, NAME_LENGTH) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct AptosCollection { + pub mutable_description: bool, + pub mutable_uri: bool, +} + +impl AptosCollection { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = format!( + "{}::{}::{}", + write_resource.data.typ.address, + write_resource.data.typ.module, + write_resource.data.typ.name + ); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::AptosCollection(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct Token { + pub collection: ResourceReference, + pub description: String, + // These are set to private because we should never get name or uri directly + name: String, + uri: String, +} + +impl Token { + pub fn get_uri_trunc(&self) -> String { + truncate_str(&self.uri, URI_LENGTH) + } + + pub fn get_name_trunc(&self) -> String { + truncate_str(&self.name, NAME_LENGTH) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct ResourceReference { + pub inner: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct FixedSupply { + #[serde(deserialize_with = "deserialize_from_string")] + pub current_supply: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + pub max_supply: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + pub total_minted: BigDecimal, +} + +impl FixedSupply { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = format!( + "{}::{}::{}", + write_resource.data.typ.address, + write_resource.data.typ.module, + write_resource.data.typ.name + ); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::FixedSupply(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct UnlimitedSupply { + #[serde(deserialize_with = "deserialize_from_string")] + pub current_supply: BigDecimal, + #[serde(deserialize_with = "deserialize_from_string")] + pub total_minted: BigDecimal, +} + +impl UnlimitedSupply { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = format!( + "{}::{}::{}", + write_resource.data.typ.address, + write_resource.data.typ.module, + write_resource.data.typ.name + ); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::UnlimitedSupply(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct BurnEvent { + #[serde(deserialize_with = "deserialize_from_string")] + pub index: BigDecimal, + token: String, +} + +impl BurnEvent { + pub fn from_event(event: &Event, txn_version: i64) -> anyhow::Result> { + let event_type = event.typ.to_string(); + if let Some(V2TokenEvent::BurnEvent(inner)) = + V2TokenEvent::from_event(event_type.as_str(), &event.data, txn_version).unwrap() + { + Ok(Some(inner)) + } else { + Ok(None) + } + } + + pub fn get_token_address(&self) -> String { + standardize_address(&self.token) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct PropertyMap { + #[serde(deserialize_with = "deserialize_token_object_property_map_from_bcs_hexstring")] + pub inner: serde_json::Value, +} + +impl PropertyMap { + pub fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + ) -> anyhow::Result> { + let type_str = format!( + "{}::{}::{}", + write_resource.data.typ.address, + write_resource.data.typ.module, + write_resource.data.typ.name + ); + if !V2TokenResource::is_resource_supported(type_str.as_str()) { + return Ok(None); + } + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + + if let V2TokenResource::PropertyMap(inner) = + V2TokenResource::from_resource(&type_str, resource.data.as_ref().unwrap(), txn_version)? + { + Ok(Some(inner)) + } else { + Ok(None) + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum V2TokenResource { + AptosCollection(AptosCollection), + Collection(Collection), + FixedSupply(FixedSupply), + ObjectCore(ObjectCore), + UnlimitedSupply(UnlimitedSupply), + Token(Token), + PropertyMap(PropertyMap), +} + +impl V2TokenResource { + pub fn is_resource_supported(data_type: &str) -> bool { + matches!( + data_type, + "0x1::object::ObjectCore" + | "0x4::collection::Collection" + | "0x4::collection::FixedSupply" + | "0x4::collection::UnlimitedSupply" + | "0x4::aptos_token::AptosCollection" + | "0x4::token::Token" + | "0x4::property_map::PropertyMap" + ) + } + + pub fn from_resource( + data_type: &str, + data: &serde_json::Value, + txn_version: i64, + ) -> Result { + match data_type { + "0x1::object::ObjectCore" => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::ObjectCore(inner))) + }, + "0x4::collection::Collection" => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::Collection(inner))) + }, + "0x4::collection::FixedSupply" => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::FixedSupply(inner))) + }, + "0x4::collection::UnlimitedSupply" => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::UnlimitedSupply(inner))) + }, + "0x4::aptos_token::AptosCollection" => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::AptosCollection(inner))) + }, + "0x4::token::Token" => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::Token(inner))) + }, + "0x4::property_map::PropertyMap" => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::PropertyMap(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + ))? + .context(format!( + "Resource unsupported! Call is_resource_supported first. version {} type {}", + txn_version, data_type + )) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub enum V2TokenEvent { + BurnEvent(BurnEvent), +} + +impl V2TokenEvent { + pub fn from_event( + data_type: &str, + data: &serde_json::Value, + txn_version: i64, + ) -> Result> { + match data_type { + "0x4::collection::BurnEvent" => { + serde_json::from_value(data.clone()).map(|inner| Some(Self::BurnEvent(inner))) + }, + _ => Ok(None), + } + .context(format!( + "version {} failed! failed to parse type {}, data {:?}", + txn_version, data_type, data + )) + } +} diff --git a/crates/indexer/src/models/v2_objects.rs b/crates/indexer/src/models/v2_objects.rs new file mode 100644 index 0000000000000..bd7a8f6d75702 --- /dev/null +++ b/crates/indexer/src/models/v2_objects.rs @@ -0,0 +1,160 @@ +// Copyright © Aptos Foundation +// SPDX-License-Identifier: Apache-2.0 + +// This is required because a diesel macro makes clippy sad +#![allow(clippy::extra_unused_lifetimes)] +#![allow(clippy::unused_unit)] + +use super::token_models::v2_token_utils::ObjectCore; +use crate::{ + models::move_resources::MoveResource, + schema::{current_objects, objects}, + util::standardize_address, +}; +use aptos_api_types::{DeleteResource, Transaction, WriteResource, WriteSetChange}; +use bigdecimal::BigDecimal; +use field_count::FieldCount; +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +// PK of current_objects, i.e. object_address +pub type CurrentObjectPK = String; + +#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(transaction_version, write_set_change_index))] +#[diesel(table_name = objects)] +pub struct Object { + pub transaction_version: i64, + pub write_set_change_index: i64, + pub object_address: String, + pub owner_address: Option, + pub state_key_hash: String, + pub guid_creation_num: Option, + pub allow_ungated_transfer: Option, + pub is_deleted: bool, +} + +#[derive(Debug, Deserialize, FieldCount, Identifiable, Insertable, Serialize)] +#[diesel(primary_key(object_address))] +#[diesel(table_name = current_objects)] +pub struct CurrentObject { + pub object_address: String, + pub owner_address: Option, + pub state_key_hash: String, + pub allow_ungated_transfer: Option, + pub last_guid_creation_num: Option, + pub last_transaction_version: i64, + pub is_deleted: bool, +} + +impl Object { + /// Only parsing 0x1 ObjectCore from transactions + pub fn from_transaction( + transaction: &Transaction, + ) -> (Vec, HashMap) { + if let Transaction::UserTransaction(user_txn) = transaction { + let mut objects = vec![]; + let mut current_objects: HashMap = HashMap::new(); + let txn_version = user_txn.info.version.0 as i64; + + for (index, wsc) in user_txn.info.changes.iter().enumerate() { + let index = index as i64; + let maybe_object_combo = match wsc { + WriteSetChange::DeleteResource(inner) => { + Self::from_delete_resource(inner, txn_version, index).unwrap() + }, + WriteSetChange::WriteResource(inner) => { + Self::from_write_resource(inner, txn_version, index).unwrap() + }, + _ => None, + }; + if let Some((object, current_object)) = maybe_object_combo { + objects.push(object); + current_objects.insert(current_object.object_address.clone(), current_object); + } + } + (objects, current_objects) + } else { + Default::default() + } + } + + fn from_write_resource( + write_resource: &WriteResource, + txn_version: i64, + write_set_change_index: i64, + ) -> anyhow::Result> { + if let Some(inner) = ObjectCore::from_write_resource(write_resource, txn_version)? { + let resource = MoveResource::from_write_resource( + write_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + object_address: resource.address.clone(), + owner_address: Some(standardize_address(inner.owner.as_str())), + state_key_hash: resource.state_key_hash.clone(), + guid_creation_num: Some(inner.guid_creation_num.clone()), + allow_ungated_transfer: Some(inner.allow_ungated_transfer), + is_deleted: false, + }, + CurrentObject { + object_address: resource.address, + owner_address: Some(standardize_address(inner.owner.as_str())), + state_key_hash: resource.state_key_hash, + allow_ungated_transfer: Some(inner.allow_ungated_transfer), + last_guid_creation_num: Some(inner.guid_creation_num), + last_transaction_version: txn_version, + is_deleted: false, + }, + ))) + } else { + Ok(None) + } + } + + /// This should never really happen since it's very difficult to delete the entire resource group + /// currently. We actually need a better way of detecting whether an object is deleted since there + /// is likely no delete resource write set change. + fn from_delete_resource( + delete_resource: &DeleteResource, + txn_version: i64, + write_set_change_index: i64, + ) -> anyhow::Result> { + if delete_resource.resource.to_string() == "0x1::object::ObjectCore" { + let resource = MoveResource::from_delete_resource( + delete_resource, + 0, // Placeholder, this isn't used anyway + txn_version, + 0, // Placeholder, this isn't used anyway + ); + Ok(Some(( + Self { + transaction_version: txn_version, + write_set_change_index, + object_address: resource.address.clone(), + owner_address: None, + state_key_hash: resource.state_key_hash.clone(), + guid_creation_num: None, + allow_ungated_transfer: None, + is_deleted: true, + }, + CurrentObject { + object_address: resource.address, + owner_address: None, + state_key_hash: resource.state_key_hash, + allow_ungated_transfer: None, + last_guid_creation_num: None, + last_transaction_version: txn_version, + is_deleted: true, + }, + ))) + } else { + Ok(None) + } + } +} diff --git a/crates/indexer/src/processors/default_processor.rs b/crates/indexer/src/processors/default_processor.rs index 3205766d46faf..dd9294e4ad699 100644 --- a/crates/indexer/src/processors/default_processor.rs +++ b/crates/indexer/src/processors/default_processor.rs @@ -18,6 +18,7 @@ use crate::{ signatures::Signature, transactions::{TransactionDetail, TransactionModel}, user_transactions::UserTransactionModel, + v2_objects::{CurrentObject, Object}, write_set_changes::{WriteSetChangeDetail, WriteSetChangeModel}, }, schema, @@ -67,10 +68,12 @@ fn insert_to_db_impl( &[CurrentTableItem], &[TableMetadata], ), + object_core: (&[Object], &[CurrentObject]), ) -> Result<(), diesel::result::Error> { let (user_transactions, signatures, block_metadata_transactions) = txn_details; let (move_modules, move_resources, table_items, current_table_items, table_metadata) = wsc_details; + let (objects, current_objects) = object_core; insert_transactions(conn, txns)?; insert_user_transactions(conn, user_transactions)?; insert_signatures(conn, signatures)?; @@ -82,6 +85,8 @@ fn insert_to_db_impl( insert_table_items(conn, table_items)?; insert_current_table_items(conn, current_table_items)?; insert_table_metadata(conn, table_metadata)?; + insert_objects(conn, objects)?; + insert_current_objects(conn, current_objects)?; Ok(()) } @@ -105,6 +110,7 @@ fn insert_to_db( Vec, Vec, ), + object_core: (Vec, Vec), ) -> Result<(), diesel::result::Error> { aptos_logger::trace!( name = name, @@ -115,6 +121,7 @@ fn insert_to_db( let (user_transactions, signatures, block_metadata_transactions) = txn_details; let (move_modules, move_resources, table_items, current_table_items, table_metadata) = wsc_details; + let (objects, current_objects) = object_core; match conn .build_transaction() .read_write() @@ -136,6 +143,7 @@ fn insert_to_db( ¤t_table_items, &table_metadata, ), + (&objects, ¤t_objects), ) }) { Ok(_) => Ok(()), @@ -151,6 +159,8 @@ fn insert_to_db( let table_items = clean_data_for_db(table_items, true); let current_table_items = clean_data_for_db(current_table_items, true); let table_metadata = clean_data_for_db(table_metadata, true); + let objects = clean_data_for_db(objects, true); + let current_objects = clean_data_for_db(current_objects, true); conn.build_transaction() .read_write() @@ -172,6 +182,7 @@ fn insert_to_db( ¤t_table_items, &table_metadata, ), + (&objects, ¤t_objects), ) }) }, @@ -335,7 +346,11 @@ fn insert_move_resources( diesel::insert_into(schema::move_resources::table) .values(&items_to_insert[start_ind..end_ind]) .on_conflict((transaction_version, write_set_change_index)) - .do_nothing(), + .do_update() + .set(( + inserted_at.eq(excluded(inserted_at)), + state_key_hash.eq(excluded(state_key_hash)), + )), None, )?; } @@ -407,6 +422,53 @@ fn insert_table_metadata( Ok(()) } +fn insert_objects( + conn: &mut PgConnection, + items_to_insert: &[Object], +) -> Result<(), diesel::result::Error> { + use schema::objects::dsl::*; + let chunks = get_chunks(items_to_insert.len(), Object::field_count()); + for (start_ind, end_ind) in chunks { + execute_with_better_error( + conn, + diesel::insert_into(schema::objects::table) + .values(&items_to_insert[start_ind..end_ind]) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + )?; + } + Ok(()) +} + +fn insert_current_objects( + conn: &mut PgConnection, + items_to_insert: &[CurrentObject], +) -> Result<(), diesel::result::Error> { + use schema::current_objects::dsl::*; + let chunks = get_chunks(items_to_insert.len(), CurrentObject::field_count()); + for (start_ind, end_ind) in chunks { + execute_with_better_error( + conn, + diesel::insert_into(schema::current_objects::table) + .values(&items_to_insert[start_ind..end_ind]) + .on_conflict(object_address) + .do_update() + .set(( + owner_address.eq(excluded(owner_address)), + state_key_hash.eq(excluded(state_key_hash)), + allow_ungated_transfer.eq(excluded(allow_ungated_transfer)), + last_guid_creation_num.eq(excluded(last_guid_creation_num)), + last_transaction_version.eq(excluded(last_transaction_version)), + is_deleted.eq(excluded(is_deleted)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_objects.last_transaction_version <= excluded.last_transaction_version "), + )?; + } + Ok(()) +} + #[async_trait] impl TransactionProcessor for DefaultTransactionProcessor { fn name(&self) -> &'static str { @@ -460,15 +522,29 @@ impl TransactionProcessor for DefaultTransactionProcessor { }, } } + + // TODO, merge this loop with above + let mut all_objects = vec![]; + let mut all_current_objects = HashMap::new(); + for txn in &transactions { + let (mut objects, current_objects) = Object::from_transaction(txn); + all_objects.append(&mut objects); + all_current_objects.extend(current_objects); + } // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes let mut current_table_items = current_table_items .into_values() .collect::>(); let mut table_metadata = table_metadata.into_values().collect::>(); + let mut all_current_objects = all_current_objects + .into_values() + .collect::>(); + // Sort by PK current_table_items .sort_by(|a, b| (&a.table_handle, &a.key_hash).cmp(&(&b.table_handle, &b.key_hash))); table_metadata.sort_by(|a, b| a.handle.cmp(&b.handle)); + all_current_objects.sort_by(|a, b| a.object_address.cmp(&b.object_address)); let mut conn = self.get_conn(); let tx_result = insert_to_db( @@ -487,6 +563,7 @@ impl TransactionProcessor for DefaultTransactionProcessor { current_table_items, table_metadata, ), + (all_objects, all_current_objects), ); match tx_result { Ok(_) => Ok(ProcessingResult::new( diff --git a/crates/indexer/src/processors/token_processor.rs b/crates/indexer/src/processors/token_processor.rs index 0ebf83ec01dc5..53c9b6da7fbff 100644 --- a/crates/indexer/src/processors/token_processor.rs +++ b/crates/indexer/src/processors/token_processor.rs @@ -18,17 +18,30 @@ use crate::{ token_datas::{CurrentTokenData, TokenData}, token_ownerships::{CurrentTokenOwnership, TokenOwnership}, tokens::{ - CurrentTokenOwnershipPK, CurrentTokenPendingClaimPK, TableMetadataForToken, Token, - TokenDataIdHash, + CurrentTokenOwnershipPK, CurrentTokenPendingClaimPK, TableHandleToOwner, + TableMetadataForToken, Token, TokenDataIdHash, + }, + v2_collections::{CollectionV2, CurrentCollectionV2, CurrentCollectionV2PK}, + v2_token_datas::{CurrentTokenDataV2, CurrentTokenDataV2PK, TokenDataV2}, + v2_token_ownerships::{ + CurrentTokenOwnershipV2, CurrentTokenOwnershipV2PK, NFTOwnershipV2, TokenOwnershipV2, + }, + v2_token_utils::{ + AptosCollection, BurnEvent, FixedSupply, ObjectCore, PropertyMap, + TokenV2AggregatedData, TokenV2AggregatedDataMapping, TokenV2Burned, UnlimitedSupply, }, }, schema, + util::{parse_timestamp, standardize_address}, }; -use aptos_api_types::Transaction; +use aptos_api_types::{Transaction, WriteSetChange}; use async_trait::async_trait; use diesel::{pg::upsert::excluded, result::Error, ExpressionMethods, PgConnection}; use field_count::FieldCount; -use std::{collections::HashMap, fmt::Debug}; +use std::{ + collections::{HashMap, HashSet}, + fmt::Debug, +}; pub const NAME: &str = "token_processor"; pub struct TokenTransactionProcessor { @@ -78,6 +91,21 @@ fn insert_to_db_impl( current_token_claims: &[CurrentTokenPendingClaim], current_ans_lookups: &[CurrentAnsLookup], nft_points: &[NftPoints], + ( + collections_v2, + token_datas_v2, + token_ownerships_v2, + current_collections_v2, + current_token_datas_v2, + current_token_ownerships_v2, + ): ( + &[CollectionV2], + &[TokenDataV2], + &[TokenOwnershipV2], + &[CurrentCollectionV2], + &[CurrentTokenDataV2], + &[CurrentTokenOwnershipV2], + ), ) -> Result<(), diesel::result::Error> { let (tokens, token_ownerships, token_datas, collection_datas) = basic_token_transaction_lists; let (current_token_ownerships, current_token_datas, current_collection_datas) = @@ -93,6 +121,12 @@ fn insert_to_db_impl( insert_current_token_claims(conn, current_token_claims)?; insert_current_ans_lookups(conn, current_ans_lookups)?; insert_nft_points(conn, nft_points)?; + insert_collections_v2(conn, collections_v2)?; + insert_token_datas_v2(conn, token_datas_v2)?; + insert_token_ownerships_v2(conn, token_ownerships_v2)?; + insert_current_collections_v2(conn, current_collections_v2)?; + insert_current_token_datas_v2(conn, current_token_datas_v2)?; + insert_current_token_ownerships_v2(conn, current_token_ownerships_v2)?; Ok(()) } @@ -116,6 +150,21 @@ fn insert_to_db( current_token_claims: Vec, current_ans_lookups: Vec, nft_points: Vec, + ( + collections_v2, + token_datas_v2, + token_ownerships_v2, + current_collections_v2, + current_token_datas_v2, + current_token_ownerships_v2, + ): ( + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, + ), ) -> Result<(), diesel::result::Error> { aptos_logger::trace!( name = name, @@ -142,6 +191,14 @@ fn insert_to_db( ¤t_token_claims, ¤t_ans_lookups, &nft_points, + ( + &collections_v2, + &token_datas_v2, + &token_ownerships_v2, + ¤t_collections_v2, + ¤t_token_datas_v2, + ¤t_token_ownerships_v2, + ), ) }) { Ok(_) => Ok(()), @@ -160,6 +217,13 @@ fn insert_to_db( let current_token_claims = clean_data_for_db(current_token_claims, true); let current_ans_lookups = clean_data_for_db(current_ans_lookups, true); let nft_points = clean_data_for_db(nft_points, true); + let collections_v2 = clean_data_for_db(collections_v2, true); + let token_datas_v2 = clean_data_for_db(token_datas_v2, true); + let token_ownerships_v2 = clean_data_for_db(token_ownerships_v2, true); + let current_collections_v2 = clean_data_for_db(current_collections_v2, true); + let current_token_datas_v2 = clean_data_for_db(current_token_datas_v2, true); + let current_token_ownerships_v2 = + clean_data_for_db(current_token_ownerships_v2, true); insert_to_db_impl( pg_conn, @@ -173,6 +237,14 @@ fn insert_to_db( ¤t_token_claims, ¤t_ans_lookups, &nft_points, + ( + &collections_v2, + &token_datas_v2, + &token_ownerships_v2, + ¤t_collections_v2, + ¤t_token_datas_v2, + ¤t_token_ownerships_v2, + ), ) }), } @@ -445,6 +517,8 @@ fn insert_current_token_claims( table_handle.eq(excluded(table_handle)), last_transaction_version.eq(excluded(last_transaction_version)), inserted_at.eq(excluded(inserted_at)), + token_data_id.eq(excluded(token_data_id)), + collection_id.eq(excluded(collection_id)), )), Some(" WHERE current_token_pending_claims.last_transaction_version <= excluded.last_transaction_version "), )?; @@ -501,6 +575,182 @@ fn insert_nft_points( Ok(()) } +fn insert_collections_v2( + conn: &mut PgConnection, + items_to_insert: &[CollectionV2], +) -> Result<(), diesel::result::Error> { + use schema::collections_v2::dsl::*; + + let chunks = get_chunks(items_to_insert.len(), CollectionV2::field_count()); + + for (start_ind, end_ind) in chunks { + execute_with_better_error( + conn, + diesel::insert_into(schema::collections_v2::table) + .values(&items_to_insert[start_ind..end_ind]) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + )?; + } + Ok(()) +} + +fn insert_token_datas_v2( + conn: &mut PgConnection, + items_to_insert: &[TokenDataV2], +) -> Result<(), diesel::result::Error> { + use schema::token_datas_v2::dsl::*; + + let chunks = get_chunks(items_to_insert.len(), TokenDataV2::field_count()); + + for (start_ind, end_ind) in chunks { + execute_with_better_error( + conn, + diesel::insert_into(schema::token_datas_v2::table) + .values(&items_to_insert[start_ind..end_ind]) + .on_conflict((transaction_version, write_set_change_index)) + .do_update() + .set(( + maximum.eq(excluded(maximum)), + supply.eq(excluded(supply)), + token_properties.eq(excluded(token_properties)), + inserted_at.eq(excluded(inserted_at)), + )), + None, + )?; + } + Ok(()) +} + +fn insert_token_ownerships_v2( + conn: &mut PgConnection, + items_to_insert: &[TokenOwnershipV2], +) -> Result<(), diesel::result::Error> { + use schema::token_ownerships_v2::dsl::*; + + let chunks = get_chunks(items_to_insert.len(), TokenOwnershipV2::field_count()); + + for (start_ind, end_ind) in chunks { + execute_with_better_error( + conn, + diesel::insert_into(schema::token_ownerships_v2::table) + .values(&items_to_insert[start_ind..end_ind]) + .on_conflict((transaction_version, write_set_change_index)) + .do_nothing(), + None, + )?; + } + Ok(()) +} + +fn insert_current_collections_v2( + conn: &mut PgConnection, + items_to_insert: &[CurrentCollectionV2], +) -> Result<(), diesel::result::Error> { + use schema::current_collections_v2::dsl::*; + + let chunks = get_chunks(items_to_insert.len(), CurrentCollectionV2::field_count()); + + for (start_ind, end_ind) in chunks { + execute_with_better_error( + conn, + diesel::insert_into(schema::current_collections_v2::table) + .values(&items_to_insert[start_ind..end_ind]) + .on_conflict(collection_id) + .do_update() + .set(( + creator_address.eq(excluded(creator_address)), + collection_name.eq(excluded(collection_name)), + description.eq(excluded(description)), + uri.eq(excluded(uri)), + current_supply.eq(excluded(current_supply)), + max_supply.eq(excluded(max_supply)), + total_minted_v2.eq(excluded(total_minted_v2)), + mutable_description.eq(excluded(mutable_description)), + mutable_uri.eq(excluded(mutable_uri)), + table_handle_v1.eq(excluded(table_handle_v1)), + token_standard.eq(excluded(token_standard)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_collections_v2.last_transaction_version <= excluded.last_transaction_version "), + )?; + } + Ok(()) +} + +fn insert_current_token_datas_v2( + conn: &mut PgConnection, + items_to_insert: &[CurrentTokenDataV2], +) -> Result<(), diesel::result::Error> { + use schema::current_token_datas_v2::dsl::*; + + let chunks = get_chunks(items_to_insert.len(), CurrentTokenDataV2::field_count()); + + for (start_ind, end_ind) in chunks { + execute_with_better_error( + conn, + diesel::insert_into(schema::current_token_datas_v2::table) + .values(&items_to_insert[start_ind..end_ind]) + .on_conflict(token_data_id) + .do_update() + .set(( + collection_id.eq(excluded(collection_id)), + token_name.eq(excluded(token_name)), + maximum.eq(excluded(maximum)), + supply.eq(excluded(supply)), + largest_property_version_v1.eq(excluded(largest_property_version_v1)), + token_uri.eq(excluded(token_uri)), + description.eq(excluded(description)), + token_properties.eq(excluded(token_properties)), + token_standard.eq(excluded(token_standard)), + is_fungible_v2.eq(excluded(is_fungible_v2)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_token_datas_v2.last_transaction_version <= excluded.last_transaction_version "), + )?; + } + Ok(()) +} + +fn insert_current_token_ownerships_v2( + conn: &mut PgConnection, + items_to_insert: &[CurrentTokenOwnershipV2], +) -> Result<(), diesel::result::Error> { + use schema::current_token_ownerships_v2::dsl::*; + + let chunks = get_chunks( + items_to_insert.len(), + CurrentTokenOwnershipV2::field_count(), + ); + + for (start_ind, end_ind) in chunks { + execute_with_better_error( + conn, + diesel::insert_into(schema::current_token_ownerships_v2::table) + .values(&items_to_insert[start_ind..end_ind]) + .on_conflict((token_data_id, property_version_v1, owner_address, storage_id)) + .do_update() + .set(( + amount.eq(excluded(amount)), + table_type_v1.eq(excluded(table_type_v1)), + token_properties_mutated_v1.eq(excluded(token_properties_mutated_v1)), + token_standard.eq(excluded(token_standard)), + is_fungible_v2.eq(excluded(is_fungible_v2)), + last_transaction_version.eq(excluded(last_transaction_version)), + last_transaction_timestamp.eq(excluded(last_transaction_timestamp)), + inserted_at.eq(excluded(inserted_at)), + )), + Some(" WHERE current_token_ownerships_v2.last_transaction_version <= excluded.last_transaction_version "), + )?; + } + Ok(()) +} + #[async_trait] impl TransactionProcessor for TokenTransactionProcessor { fn name(&self) -> &'static str { @@ -520,15 +770,13 @@ impl TransactionProcessor for TokenTransactionProcessor { let table_handle_to_owner = TableMetadataForToken::get_table_handle_to_owner_from_transactions(&transactions); + // Token V1 only, this section will be deprecated soon let mut all_tokens = vec![]; let mut all_token_ownerships = vec![]; let mut all_token_datas = vec![]; let mut all_collection_datas = vec![]; let mut all_token_activities = vec![]; - // This is likely temporary - let mut all_nft_points = vec![]; - // Hashmap key will be the PK of the table, we do not want to send duplicates writes to the db within a batch let mut all_current_token_ownerships: HashMap< CurrentTokenOwnershipPK, @@ -545,7 +793,10 @@ impl TransactionProcessor for TokenTransactionProcessor { let mut all_current_ans_lookups: HashMap = HashMap::new(); - for txn in transactions { + // This is likely temporary + let mut all_nft_points = vec![]; + + for txn in &transactions { let ( mut tokens, mut token_ownerships, @@ -555,7 +806,7 @@ impl TransactionProcessor for TokenTransactionProcessor { current_token_datas, current_collection_datas, current_token_claims, - ) = Token::from_transaction(&txn, &table_handle_to_owner, &mut conn); + ) = Token::from_transaction(txn, &table_handle_to_owner, &mut conn); all_tokens.append(&mut tokens); all_token_ownerships.append(&mut token_ownerships); all_token_datas.append(&mut token_datas); @@ -566,7 +817,7 @@ impl TransactionProcessor for TokenTransactionProcessor { all_current_collection_datas.extend(current_collection_datas); // Track token activities - let mut activities = TokenActivity::from_transaction(&txn); + let mut activities = TokenActivity::from_transaction(txn); all_token_activities.append(&mut activities); // claims @@ -574,12 +825,11 @@ impl TransactionProcessor for TokenTransactionProcessor { // ANS lookups let current_ans_lookups = - CurrentAnsLookup::from_transaction(&txn, self.ans_contract_address.clone()); + CurrentAnsLookup::from_transaction(txn, self.ans_contract_address.clone()); all_current_ans_lookups.extend(current_ans_lookups); // NFT points - let nft_points_txn = - NftPoints::from_transaction(&txn, self.nft_points_contract.clone()); + let nft_points_txn = NftPoints::from_transaction(txn, self.nft_points_contract.clone()); if let Some(nft_points) = nft_points_txn { all_nft_points.push(nft_points); } @@ -631,6 +881,16 @@ impl TransactionProcessor for TokenTransactionProcessor { all_current_ans_lookups .sort_by(|a, b| a.domain.cmp(&b.domain).then(a.subdomain.cmp(&b.subdomain))); + // Token V2 processing which includes token v1 + let ( + collections_v2, + token_datas_v2, + token_ownerships_v2, + current_collections_v2, + current_token_ownerships_v2, + current_token_datas_v2, + ) = parse_v2_token(&transactions, &table_handle_to_owner, &mut conn); + let tx_result = insert_to_db( &mut conn, self.name(), @@ -651,6 +911,15 @@ impl TransactionProcessor for TokenTransactionProcessor { all_current_token_claims, all_current_ans_lookups, all_nft_points, + // Token V2 stuff which will token v1 tables above + ( + collections_v2, + token_datas_v2, + token_ownerships_v2, + current_collections_v2, + current_token_ownerships_v2, + current_token_datas_v2, + ), ); match tx_result { Ok(_) => Ok(ProcessingResult::new( @@ -671,3 +940,368 @@ impl TransactionProcessor for TokenTransactionProcessor { &self.connection_pool } } + +fn parse_v2_token( + transactions: &[Transaction], + table_handle_to_owner: &TableHandleToOwner, + conn: &mut PgPoolConnection, +) -> ( + Vec, + Vec, + Vec, + Vec, + Vec, + Vec, +) { + // Token V2 and V1 combined + let mut collections_v2 = vec![]; + let mut token_datas_v2 = vec![]; + let mut token_ownerships_v2 = vec![]; + let mut current_collections_v2: HashMap = + HashMap::new(); + let mut current_token_datas_v2: HashMap = + HashMap::new(); + let mut current_token_ownerships_v2: HashMap< + CurrentTokenOwnershipV2PK, + CurrentTokenOwnershipV2, + > = HashMap::new(); + // Tracks prior ownership in case a token gets burned + let mut prior_nft_ownership: HashMap = HashMap::new(); + // Get Metadata for token v2 by object + // We want to persist this through the entire batch so that even if a token is burned, + // we can still get the object core metadata for it + let mut token_v2_metadata: TokenV2AggregatedDataMapping = HashMap::new(); + + // Code above is inefficient (multiple passthroughs) so I'm approaching TokenV2 with a cleaner code structure + for txn in transactions { + if let Transaction::UserTransaction(user_txn) = txn { + let txn_version = user_txn.info.version.0 as i64; + let txn_timestamp = parse_timestamp(user_txn.timestamp.0, txn_version); + // Get burn events for token v2 by object + let mut tokens_burned: TokenV2Burned = HashSet::new(); + + // Pass through events to get the burn events and token activities v2 + for (_, event) in user_txn.events.iter().enumerate() { + if let Some(burn_event) = BurnEvent::from_event(event, txn_version).unwrap() { + tokens_burned.insert(burn_event.get_token_address()); + } + } + + // Need to do a first pass to get all the objects + for (_, wsc) in user_txn.info.changes.iter().enumerate() { + if let WriteSetChange::WriteResource(wr) = wsc { + if let Some(object_core) = + ObjectCore::from_write_resource(wr, txn_version).unwrap() + { + token_v2_metadata.insert( + standardize_address(&wr.address.to_string()), + TokenV2AggregatedData { + aptos_collection: None, + fixed_supply: None, + object: object_core, + unlimited_supply: None, + property_map: None, + }, + ); + } + } + } + + // Need to do a second pass to get all the structs related to the object + for (_, wsc) in user_txn.info.changes.iter().enumerate() { + if let WriteSetChange::WriteResource(wr) = wsc { + let address = standardize_address(&wr.address.to_string()); + if let Some(aggregated_data) = token_v2_metadata.get_mut(&address) { + if let Some(fixed_supply) = + FixedSupply::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.fixed_supply = Some(fixed_supply); + } + if let Some(unlimited_supply) = + UnlimitedSupply::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.unlimited_supply = Some(unlimited_supply); + } + if let Some(aptos_collection) = + AptosCollection::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.aptos_collection = Some(aptos_collection); + } + if let Some(property_map) = + PropertyMap::from_write_resource(wr, txn_version).unwrap() + { + aggregated_data.property_map = Some(property_map); + } + } + } + } + + for (index, wsc) in user_txn.info.changes.iter().enumerate() { + let wsc_index = index as i64; + match wsc { + WriteSetChange::WriteTableItem(table_item) => { + if let Some((collection, current_collection)) = + CollectionV2::get_v1_from_write_table_item( + table_item, + txn_version, + wsc_index, + txn_timestamp, + table_handle_to_owner, + conn, + ) + .unwrap() + { + collections_v2.push(collection); + current_collections_v2.insert( + current_collection.collection_id.clone(), + current_collection, + ); + } + if let Some((token_data, current_token_data)) = + TokenDataV2::get_v1_from_write_table_item( + table_item, + txn_version, + wsc_index, + txn_timestamp, + ) + .unwrap() + { + token_datas_v2.push(token_data); + current_token_datas_v2.insert( + current_token_data.token_data_id.clone(), + current_token_data, + ); + } + if let Some((token_ownership, current_token_ownership)) = + TokenOwnershipV2::get_v1_from_write_table_item( + table_item, + txn_version, + wsc_index, + txn_timestamp, + table_handle_to_owner, + ) + .unwrap() + { + token_ownerships_v2.push(token_ownership); + if let Some(cto) = current_token_ownership { + prior_nft_ownership.insert( + cto.token_data_id.clone(), + NFTOwnershipV2 { + token_data_id: cto.token_data_id.clone(), + owner_address: cto.owner_address.clone(), + is_soulbound: cto.is_soulbound_v2, + }, + ); + current_token_ownerships_v2.insert( + ( + cto.token_data_id.clone(), + cto.property_version_v1.clone(), + cto.owner_address.clone(), + cto.storage_id.clone(), + ), + cto, + ); + } + } + }, + WriteSetChange::DeleteTableItem(table_item) => { + if let Some((token_ownership, current_token_ownership)) = + TokenOwnershipV2::get_v1_from_delete_table_item( + table_item, + txn_version, + wsc_index, + txn_timestamp, + table_handle_to_owner, + ) + .unwrap() + { + token_ownerships_v2.push(token_ownership); + if let Some(cto) = current_token_ownership { + prior_nft_ownership.insert( + cto.token_data_id.clone(), + NFTOwnershipV2 { + token_data_id: cto.token_data_id.clone(), + owner_address: cto.owner_address.clone(), + is_soulbound: cto.is_soulbound_v2, + }, + ); + current_token_ownerships_v2.insert( + ( + cto.token_data_id.clone(), + cto.property_version_v1.clone(), + cto.owner_address.clone(), + cto.storage_id.clone(), + ), + cto, + ); + } + } + }, + WriteSetChange::WriteResource(resource) => { + if let Some((collection, current_collection)) = + CollectionV2::get_v2_from_write_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &token_v2_metadata, + ) + .unwrap() + { + collections_v2.push(collection); + current_collections_v2.insert( + current_collection.collection_id.clone(), + current_collection, + ); + } + if let Some((token_data, current_token_data)) = + TokenDataV2::get_v2_from_write_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &token_v2_metadata, + ) + .unwrap() + { + // Add NFT ownership + let (nft_ownership, current_nft_ownership) = + TokenOwnershipV2::get_nft_v2_from_token_data( + &token_data, + &token_v2_metadata, + ) + .unwrap(); + token_datas_v2.push(token_data); + current_token_datas_v2.insert( + current_token_data.token_data_id.clone(), + current_token_data, + ); + token_ownerships_v2.push(nft_ownership); + prior_nft_ownership.insert( + current_nft_ownership.token_data_id.clone(), + NFTOwnershipV2 { + token_data_id: current_nft_ownership.token_data_id.clone(), + owner_address: current_nft_ownership.owner_address.clone(), + is_soulbound: current_nft_ownership.is_soulbound_v2, + }, + ); + current_token_ownerships_v2.insert( + ( + current_nft_ownership.token_data_id.clone(), + current_nft_ownership.property_version_v1.clone(), + current_nft_ownership.owner_address.clone(), + current_nft_ownership.storage_id.clone(), + ), + current_nft_ownership, + ); + + // Add burned NFT handling + if let Some((nft_ownership, current_nft_ownership)) = + TokenOwnershipV2::get_burned_nft_v2_from_write_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &tokens_burned, + ) + .unwrap() + { + token_ownerships_v2.push(nft_ownership); + prior_nft_ownership.insert( + current_nft_ownership.token_data_id.clone(), + NFTOwnershipV2 { + token_data_id: current_nft_ownership.token_data_id.clone(), + owner_address: current_nft_ownership.owner_address.clone(), + is_soulbound: current_nft_ownership.is_soulbound_v2, + }, + ); + current_token_ownerships_v2.insert( + ( + current_nft_ownership.token_data_id.clone(), + current_nft_ownership.property_version_v1.clone(), + current_nft_ownership.owner_address.clone(), + current_nft_ownership.storage_id.clone(), + ), + current_nft_ownership, + ); + } + } + }, + WriteSetChange::DeleteResource(resource) => { + // Add burned NFT handling + if let Some((nft_ownership, current_nft_ownership)) = + TokenOwnershipV2::get_burned_nft_v2_from_delete_resource( + resource, + txn_version, + wsc_index, + txn_timestamp, + &prior_nft_ownership, + &tokens_burned, + conn, + ) + .unwrap() + { + token_ownerships_v2.push(nft_ownership); + prior_nft_ownership.insert( + current_nft_ownership.token_data_id.clone(), + NFTOwnershipV2 { + token_data_id: current_nft_ownership.token_data_id.clone(), + owner_address: current_nft_ownership.owner_address.clone(), + is_soulbound: current_nft_ownership.is_soulbound_v2, + }, + ); + current_token_ownerships_v2.insert( + ( + current_nft_ownership.token_data_id.clone(), + current_nft_ownership.property_version_v1.clone(), + current_nft_ownership.owner_address.clone(), + current_nft_ownership.storage_id.clone(), + ), + current_nft_ownership, + ); + } + }, + _ => {}, + } + } + } + } + + // Getting list of values and sorting by pk in order to avoid postgres deadlock since we're doing multi threaded db writes + let mut current_collections_v2 = current_collections_v2 + .into_values() + .collect::>(); + let mut current_token_datas_v2 = current_token_datas_v2 + .into_values() + .collect::>(); + let mut current_token_ownerships_v2 = current_token_ownerships_v2 + .into_values() + .collect::>(); + + // Sort by PK + current_collections_v2.sort_by(|a, b| a.collection_id.cmp(&b.collection_id)); + current_token_datas_v2.sort_by(|a, b| a.token_data_id.cmp(&b.token_data_id)); + current_token_ownerships_v2.sort_by(|a, b| { + ( + &a.token_data_id, + &a.property_version_v1, + &a.owner_address, + &a.storage_id, + ) + .cmp(&( + &b.token_data_id, + &b.property_version_v1, + &b.owner_address, + &b.storage_id, + )) + }); + + ( + collections_v2, + token_datas_v2, + token_ownerships_v2, + current_collections_v2, + current_token_datas_v2, + current_token_ownerships_v2, + ) +} diff --git a/crates/indexer/src/schema.rs b/crates/indexer/src/schema.rs index 611fb05bf4ff1..0bf1fa4e47e7b 100644 --- a/crates/indexer/src/schema.rs +++ b/crates/indexer/src/schema.rs @@ -96,6 +96,27 @@ diesel::table! { } } +diesel::table! { + collections_v2 (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + collection_id -> Varchar, + creator_address -> Varchar, + collection_name -> Varchar, + description -> Text, + uri -> Varchar, + current_supply -> Numeric, + max_supply -> Nullable, + total_minted_v2 -> Nullable, + mutable_description -> Nullable, + mutable_uri -> Nullable, + table_handle_v1 -> Nullable, + token_standard -> Varchar, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + diesel::table! { current_ans_lookup (domain, subdomain) { domain -> Varchar, @@ -139,6 +160,26 @@ diesel::table! { } } +diesel::table! { + current_collections_v2 (collection_id) { + collection_id -> Varchar, + creator_address -> Varchar, + collection_name -> Varchar, + description -> Text, + uri -> Varchar, + current_supply -> Numeric, + max_supply -> Nullable, + total_minted_v2 -> Nullable, + mutable_description -> Nullable, + mutable_uri -> Nullable, + table_handle_v1 -> Nullable, + token_standard -> Varchar, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + diesel::table! { current_delegated_staking_pool_balances (staking_pool_address) { staking_pool_address -> Varchar, @@ -161,6 +202,19 @@ diesel::table! { } } +diesel::table! { + current_objects (object_address) { + object_address -> Varchar, + owner_address -> Varchar, + state_key_hash -> Varchar, + allow_ungated_transfer -> Bool, + last_guid_creation_num -> Numeric, + last_transaction_version -> Int8, + is_deleted -> Bool, + inserted_at -> Timestamp, + } +} + diesel::table! { current_staking_pool_voter (staking_pool_address) { staking_pool_address -> Varchar, @@ -211,6 +265,25 @@ diesel::table! { } } +diesel::table! { + current_token_datas_v2 (token_data_id) { + token_data_id -> Varchar, + collection_id -> Varchar, + token_name -> Varchar, + maximum -> Nullable, + supply -> Numeric, + largest_property_version_v1 -> Nullable, + token_uri -> Varchar, + description -> Text, + token_properties -> Jsonb, + token_standard -> Varchar, + is_fungible_v2 -> Nullable, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + diesel::table! { current_token_ownerships (token_data_id_hash, property_version, owner_address) { token_data_id_hash -> Varchar, @@ -229,6 +302,24 @@ diesel::table! { } } +diesel::table! { + current_token_ownerships_v2 (token_data_id, property_version_v1, owner_address, storage_id) { + token_data_id -> Varchar, + property_version_v1 -> Numeric, + owner_address -> Varchar, + storage_id -> Varchar, + amount -> Numeric, + table_type_v1 -> Nullable, + token_properties_mutated_v1 -> Nullable, + is_soulbound_v2 -> Nullable, + token_standard -> Varchar, + is_fungible_v2 -> Nullable, + last_transaction_version -> Int8, + last_transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + diesel::table! { current_token_pending_claims (token_data_id_hash, property_version, from_address, to_address) { token_data_id_hash -> Varchar, @@ -244,6 +335,8 @@ diesel::table! { last_transaction_version -> Int8, inserted_at -> Timestamp, last_transaction_timestamp -> Timestamp, + token_data_id -> Varchar, + collection_id -> Varchar, } } @@ -336,6 +429,7 @@ diesel::table! { data -> Nullable, is_deleted -> Bool, inserted_at -> Timestamp, + state_key_hash -> Varchar, } } @@ -351,6 +445,20 @@ diesel::table! { } } +diesel::table! { + objects (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + object_address -> Varchar, + owner_address -> Nullable, + state_key_hash -> Varchar, + guid_creation_num -> Nullable, + allow_ungated_transfer -> Nullable, + is_deleted -> Bool, + inserted_at -> Timestamp, + } +} + diesel::table! { processor_status (processor) { processor -> Varchar, @@ -474,6 +582,26 @@ diesel::table! { } } +diesel::table! { + token_datas_v2 (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + token_data_id -> Varchar, + collection_id -> Varchar, + token_name -> Varchar, + maximum -> Nullable, + supply -> Numeric, + largest_property_version_v1 -> Nullable, + token_uri -> Varchar, + token_properties -> Jsonb, + description -> Text, + token_standard -> Varchar, + is_fungible_v2 -> Nullable, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + diesel::table! { token_ownerships (token_data_id_hash, property_version, transaction_version, table_handle) { token_data_id_hash -> Varchar, @@ -492,6 +620,25 @@ diesel::table! { } } +diesel::table! { + token_ownerships_v2 (transaction_version, write_set_change_index) { + transaction_version -> Int8, + write_set_change_index -> Int8, + token_data_id -> Varchar, + property_version_v1 -> Numeric, + owner_address -> Nullable, + storage_id -> Varchar, + amount -> Numeric, + table_type_v1 -> Nullable, + token_properties_mutated_v1 -> Nullable, + is_soulbound_v2 -> Nullable, + token_standard -> Varchar, + is_fungible_v2 -> Nullable, + transaction_timestamp -> Timestamp, + inserted_at -> Timestamp, + } +} + diesel::table! { tokens (token_data_id_hash, property_version, transaction_version) { token_data_id_hash -> Varchar, @@ -566,15 +713,20 @@ diesel::allow_tables_to_appear_in_same_query!( coin_infos, coin_supply, collection_datas, + collections_v2, current_ans_lookup, current_coin_balances, current_collection_datas, + current_collections_v2, current_delegated_staking_pool_balances, current_delegator_balances, + current_objects, current_staking_pool_voter, current_table_items, current_token_datas, + current_token_datas_v2, current_token_ownerships, + current_token_ownerships_v2, current_token_pending_claims, delegated_staking_activities, delegated_staking_pool_balances, @@ -585,6 +737,7 @@ diesel::allow_tables_to_appear_in_same_query!( move_modules, move_resources, nft_points, + objects, processor_status, processor_statuses, proposal_votes, @@ -593,7 +746,9 @@ diesel::allow_tables_to_appear_in_same_query!( table_metadatas, token_activities, token_datas, + token_datas_v2, token_ownerships, + token_ownerships_v2, tokens, transactions, user_transactions, diff --git a/crates/indexer/src/util.rs b/crates/indexer/src/util.rs index 3aabd3b84371d..7aeee5ef91017 100644 --- a/crates/indexer/src/util.rs +++ b/crates/indexer/src/util.rs @@ -1,7 +1,7 @@ // Copyright © Aptos Foundation // SPDX-License-Identifier: Apache-2.0 -use crate::models::property_map::PropertyMap; +use crate::models::property_map::{PropertyMap, TokenObjectPropertyMap}; use aptos_api_types::Address; use bigdecimal::{BigDecimal, Signed, ToPrimitive, Zero}; use serde::{Deserialize, Deserializer}; @@ -101,6 +101,18 @@ where Ok(convert_bcs_propertymap(s.clone()).unwrap_or(s)) } +/// convert the bcs encoded inner value of property_map to its original value in string format +pub fn deserialize_token_object_property_map_from_bcs_hexstring<'de, D>( + deserializer: D, +) -> core::result::Result +where + D: Deserializer<'de>, +{ + let s = serde_json::Value::deserialize(deserializer)?; + // iterate the json string to convert key-value pair + Ok(convert_bcs_token_object_propertymap(s.clone()).unwrap_or(s)) +} + pub fn deserialize_string_from_hexstring<'de, D>( deserializer: D, ) -> core::result::Result @@ -117,16 +129,36 @@ pub fn convert_bcs_hex(typ: String, value: String) -> Option { match typ.as_str() { "0x1::string::String" => bcs::from_bytes::(decoded.as_slice()), - "u8" => bcs::from_bytes::(decoded.as_slice()).map(|e| format!("{}", e)), - "u64" => bcs::from_bytes::(decoded.as_slice()).map(|e| format!("{}", e)), - "u128" => bcs::from_bytes::(decoded.as_slice()).map(|e| format!("{}", e)), - "bool" => bcs::from_bytes::(decoded.as_slice()).map(|e| format!("{}", e)), - "address" => bcs::from_bytes::
(decoded.as_slice()).map(|e| format!("{}", e)), + "u8" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + "u64" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + "u128" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + "bool" => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + "address" => bcs::from_bytes::
(decoded.as_slice()).map(|e| e.to_string()), _ => Ok(value), } .ok() } +/// Convert the bcs serialized vector to its original string format for token v2 property map. +pub fn convert_bcs_hex_new(typ: u8, value: String) -> Option { + let decoded = hex::decode(value.strip_prefix("0x").unwrap_or(&*value)).ok()?; + + match typ { + 0 /* bool */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 1 /* u8 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 2 /* u16 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 3 /* u32 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 4 /* u64 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 5 /* u128 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 6 /* u256 */ => bcs::from_bytes::(decoded.as_slice()).map(|e| e.to_string()), + 7 /* address */ => bcs::from_bytes::
(decoded.as_slice()).map(|e| e.to_string()), + 8 /* byte_vector */ => bcs::from_bytes::>(decoded.as_slice()).map(|e| format!("0x{}", hex::encode(e))), + 9 /* string */ => bcs::from_bytes::(decoded.as_slice()), + _ => Ok(value), + } + .ok() +} + /// Convert the json serialized PropertyMap's inner BCS fields to their original value in string format pub fn convert_bcs_propertymap(s: Value) -> Option { match PropertyMap::from_bcs_encode_str(s) { @@ -138,6 +170,16 @@ pub fn convert_bcs_propertymap(s: Value) -> Option { } } +pub fn convert_bcs_token_object_propertymap(s: Value) -> Option { + match TokenObjectPropertyMap::from_bcs_encode_str(s) { + Some(e) => match serde_json::to_value(&e) { + Ok(val) => Some(val), + Err(_) => None, + }, + None => None, + } +} + /// Convert the vector that is directly generated from b"xxx" pub fn convert_hex(val: String) -> Option { let decoded = hex::decode(val.strip_prefix("0x").unwrap_or(&*val)).ok()?; @@ -164,6 +206,12 @@ mod tests { pub default_properties: serde_json::Value, } + #[derive(Serialize, Deserialize, Debug)] + struct TokenObjectDataMock { + #[serde(deserialize_with = "deserialize_token_object_property_map_from_bcs_hexstring")] + pub default_properties: serde_json::Value, + } + #[test] fn test_parse_timestamp() { let ts = parse_timestamp(1649560602763949, 1); @@ -243,4 +291,68 @@ mod tests { let d: TokenDataMock = serde_json::from_str(val.as_str()).unwrap(); assert_eq!(d.default_properties, Value::Object(serde_json::Map::new())); } + + #[test] + fn test_deserialize_token_object_property_map() { + let test_property_json = r#" + { + "data": [{ + "key": "Rank", + "value": { + "type": 9, + "value": "0x0642726f6e7a65" + } + }, + { + "key": "address_property", + "value": { + "type": 7, + "value": "0x2b4d540735a4e128fda896f988415910a45cab41c9ddd802b32dd16e8f9ca3cd" + } + }, + { + "key": "bytes_property", + "value": { + "type": 8, + "value": "0x0401020304" + } + }, + { + "key": "u64_property", + "value": { + "type": 4, + "value": "0x0000000000000001" + } + } + ] + } + "#; + let test_property_json: serde_json::Value = + serde_json::from_str(test_property_json).unwrap(); + let test_struct = TokenObjectDataMock { + default_properties: test_property_json, + }; + let val = serde_json::to_string(&test_struct).unwrap(); + let d: TokenObjectDataMock = serde_json::from_str(val.as_str()).unwrap(); + assert_eq!(d.default_properties["Rank"], "Bronze"); + assert_eq!( + d.default_properties["address_property"], + "0x2b4d540735a4e128fda896f988415910a45cab41c9ddd802b32dd16e8f9ca3cd" + ); + assert_eq!(d.default_properties["bytes_property"], "0x01020304"); + assert_eq!(d.default_properties["u64_property"], "72057594037927936"); + } + + #[test] + fn test_empty_token_object_property_map() { + let test_property_json = r#"{"data": []}"#; + let test_property_json: serde_json::Value = + serde_json::from_str(test_property_json).unwrap(); + let test_struct = TokenObjectDataMock { + default_properties: test_property_json, + }; + let val = serde_json::to_string(&test_struct).unwrap(); + let d: TokenObjectDataMock = serde_json::from_str(val.as_str()).unwrap(); + assert_eq!(d.default_properties, Value::Object(serde_json::Map::new())); + } }