Skip to content

Commit

Permalink
More cleanups on the deleted/unwrapped object id handling (MystenLabs…
Browse files Browse the repository at this point in the history
…#797)

* More cleanups on the delted/unwraped object id handling

* Pass in new object ids as HashSet directly
  • Loading branch information
lxfind authored Mar 12, 2022
1 parent 4c9797f commit 11af754
Show file tree
Hide file tree
Showing 7 changed files with 130 additions and 144 deletions.
39 changes: 3 additions & 36 deletions sui_core/src/authority.rs
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ impl AuthorityState {
let mut tx_ctx = TxContext::new(&transaction.sender_address(), transaction_digest);

let gas_object_id = transaction.gas_payment_object_ref().0;
let (mut temporary_store, status) =
let (temporary_store, status) =
self.execute_transaction(transaction, inputs, &mut tx_ctx)?;
debug!(
gas_used = status.gas_used(),
Expand All @@ -457,23 +457,17 @@ impl AuthorityState {
// Remove from dependencies the generic hash
transaction_dependencies.remove(&TransactionDigest::genesis());

// Objects that were wrapped in the past and just got unwrapped
// require special patch up. It also affects how signed effects are generated.
// See detailed comments in the implementation of [`AuthorityTemporaryStore::patch_unwrapped_objects`].
let unwrapped_object_ids = self.get_unwrapped_object_ids(&temporary_store, &tx_ctx)?;
temporary_store.patch_unwrapped_objects(&unwrapped_object_ids);
let to_signed_effects = temporary_store.to_signed_effects(
let signed_effects = temporary_store.to_signed_effects(
&self.name,
&*self.secret,
&transaction_digest,
transaction_dependencies.into_iter().collect(),
status,
&gas_object_id,
unwrapped_object_ids,
);
// Update the database in an atomic manner
let (seq, resp) = self
.update_state(temporary_store, certificate, to_signed_effects)
.update_state(temporary_store, certificate, signed_effects)
.instrument(tracing::debug_span!("db_update_state"))
.await?; // Returns the OrderInfoResponse

Expand Down Expand Up @@ -943,33 +937,6 @@ impl AuthorityState {
) -> Result<Option<(ObjectRef, TransactionDigest)>, SuiError> {
self._database.get_latest_parent_entry(object_id)
}

/// Find all objects that were wrapped in the past and got unwrapped in this
/// transaction. An unwrapped object can either show up after this transaction
/// (i.e. in written), or gets deleted in this transaction (in deleted).
fn get_unwrapped_object_ids(
&self,
temporary_store: &AuthorityTemporaryStore,
ctx: &TxContext,
) -> SuiResult<HashSet<ObjectID>> {
// unwrapped will contain all objects from this transaction that were
// written or deleted, which were not in the input and do not have an
// ID that is generated by the given TxContext. These were presumably
// unwrapped as part of the transaction that created the temp store.
let ids_generated = ctx.recreate_all_ids();

let unwrapped: HashSet<_> = temporary_store
.written()
.iter()
.map(|(objid, _)| *objid)
.chain(temporary_store.deleted().iter().map(|(objid, _)| *objid))
.filter(|objid| {
!(ids_generated.contains(objid) || temporary_store.objects().contains_key(objid))
})
.collect();

Ok(unwrapped)
}
}

impl ModuleResolver for AuthorityState {
Expand Down
44 changes: 19 additions & 25 deletions sui_core/src/authority/temporary_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@ pub struct AuthorityTemporaryStore {
deleted: BTreeMap<ObjectID, (SequenceNumber, DeleteKind)>,
/// Ordered sequence of events emitted by execution
events: Vec<Event>,
// New object IDs created during the transaction, needed for
// telling apart unwrapped objects.
created_object_ids: HashSet<ObjectID>,
}

impl AuthorityTemporaryStore {
Expand All @@ -48,6 +51,7 @@ impl AuthorityTemporaryStore {
written: BTreeMap::new(),
deleted: BTreeMap::new(),
events: Vec::new(),
created_object_ids: HashSet::new(),
}
}

Expand Down Expand Up @@ -93,26 +97,6 @@ impl AuthorityTemporaryStore {
}
}

/// We need to special handle objects that was wrapped in the past and now unwrapped.
/// When an object was wrapped at version `v`, we added an record into `parent_sync`
/// with version `v+1` along with OBJECT_DIGEST_WRAPPED. Now when the object is unwrapped,
/// it will also have version `v+1`, leading to a violation of the invariant that any
/// object_id and version pair must be unique. Hence for any object that's just unwrapped,
/// we force incrementing its version number again to make it `v+2` before writing to the store.
pub fn patch_unwrapped_objects(&mut self, unwrapped_object_ids: &HashSet<ObjectID>) {
for id in unwrapped_object_ids {
// Unwrapped object could show up in either written or deleted.
if let Some(object) = self.written.get_mut(id) {
object.data.try_as_move_mut().unwrap().increment_version();
} else {
// unwrap safe because we constructed unwrapped_object_ids from written and deleted.
// If the object is not in written, it must be in deleted.
let entry = self.deleted.get_mut(id).unwrap();
entry.0 = entry.0.increment();
}
}
}

pub fn to_signed_effects(
&self,
authority_name: &AuthorityName,
Expand All @@ -121,7 +105,6 @@ impl AuthorityTemporaryStore {
transaction_dependencies: Vec<TransactionDigest>,
status: ExecutionStatus,
gas_object_id: &ObjectID,
unwrapped_object_ids: HashSet<ObjectID>,
) -> SignedTransactionEffects {
let gas_object = &self.written[gas_object_id];
let effects = TransactionEffects {
Expand All @@ -130,9 +113,7 @@ impl AuthorityTemporaryStore {
created: self
.written
.iter()
.filter(|(id, _)| {
!self.objects.contains_key(*id) && !unwrapped_object_ids.contains(*id)
})
.filter(|(id, _)| self.created_object_ids.contains(*id))
.map(|(_, object)| (object.to_object_reference(), object.owner))
.collect(),
mutated: self
Expand All @@ -145,7 +126,7 @@ impl AuthorityTemporaryStore {
.written
.iter()
.filter(|(id, _)| {
!self.objects.contains_key(*id) && unwrapped_object_ids.contains(*id)
!self.objects.contains_key(*id) && !self.created_object_ids.contains(*id)
})
.map(|(_, object)| (object.to_object_reference(), object.owner))
.collect(),
Expand Down Expand Up @@ -208,6 +189,14 @@ impl AuthorityTemporaryStore {
},
"Mutable input neither written nor deleted."
);

debug_assert!(
{
let input_ids: HashSet<ObjectID> = self.objects.clone().into_keys().collect();
self.created_object_ids.is_disjoint(&input_ids)
},
"Newly created object IDs showed up in the input",
);
}
}

Expand All @@ -217,6 +206,7 @@ impl Storage for AuthorityTemporaryStore {
self.written.clear();
self.deleted.clear();
self.events.clear();
self.created_object_ids.clear();
}

fn read_object(&self, id: &ObjectID) -> Option<Object> {
Expand All @@ -234,6 +224,10 @@ impl Storage for AuthorityTemporaryStore {
}
}

fn set_create_object_ids(&mut self, ids: HashSet<ObjectID>) {
self.created_object_ids = ids;
}

/*
Invariant: A key assumption of the write-delete logic
is that an entry is not both added and deleted by the
Expand Down
160 changes: 86 additions & 74 deletions sui_programmability/adapter/src/adapter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -349,6 +349,7 @@ pub fn store_package_and_init_modules<
// wrap the modules in an object, write it to the store
// The call to unwrap() will go away once we remove address owner from Immutable objects.
let package_object = Object::new_package(modules, ctx.digest());
state_view.set_create_object_ids(HashSet::from([package_object.id()]));
state_view.write_object(package_object);

init_modules(state_view, vm, modules_to_init, ctx, gas_budget)
Expand Down Expand Up @@ -520,69 +521,89 @@ fn process_successful_execution<
}
// process events to identify transfers, freezes
let mut gas_used = 0;
let mut gas_refund = 0;
let tx_digest = ctx.digest();
let mut deleted_ids = HashMap::new();
let mut deleted_child_ids = HashSet::new();
// newly_generated_ids contains all object IDs generated in this transaction.
let newly_generated_ids = ctx.recreate_all_ids();
state_view.set_create_object_ids(newly_generated_ids.clone());
for e in events {
let (recipient, event_type, type_, event_bytes) = e;
let result = match EventType::try_from(event_type as u8)
.expect("Safe because event_type is derived from an EventType enum")
{
EventType::TransferToAddress => handle_transfer(
Owner::AddressOwner(SuiAddress::try_from(recipient.as_slice()).unwrap()),
type_,
event_bytes,
tx_digest,
&mut by_value_objects,
&mut gas_used,
state_view,
&mut object_owner_map,
),
EventType::FreezeObject => handle_transfer(
Owner::SharedImmutable,
type_,
event_bytes,
tx_digest,
&mut by_value_objects,
&mut gas_used,
state_view,
&mut object_owner_map,
),
EventType::TransferToObject => handle_transfer(
Owner::ObjectOwner(ObjectID::try_from(recipient.borrow()).unwrap().into()),
type_,
event_bytes,
tx_digest,
&mut by_value_objects,
&mut gas_used,
state_view,
&mut object_owner_map,
),
let event_type = EventType::try_from(event_type as u8)
.expect("Safe because event_type is derived from an EventType enum");
let result = match event_type {
EventType::TransferToAddress
| EventType::FreezeObject
| EventType::TransferToObject => {
let new_owner = match event_type {
EventType::TransferToAddress => {
Owner::AddressOwner(SuiAddress::try_from(recipient.as_slice()).unwrap())
}
EventType::FreezeObject => Owner::SharedImmutable,
EventType::TransferToObject => {
Owner::ObjectOwner(ObjectID::try_from(recipient.borrow()).unwrap().into())
}
_ => unreachable!(),
};
handle_transfer(
new_owner,
type_,
event_bytes,
tx_digest,
&mut by_value_objects,
&mut gas_used,
state_view,
&mut object_owner_map,
&newly_generated_ids,
)
}
EventType::ShareObject => Err(SuiError::UnsupportedSharedObjectError),
EventType::DeleteObjectID => {
// unwrap safe because this event can only be emitted from processing
// native call delete_id, which guarantees the type of the id.
let id: VersionedID = bcs::from_bytes(&event_bytes).unwrap();
let obj_id = id.object_id();
// We don't care about IDs that are generated in this same transaction
// but only to be deleted.
if !newly_generated_ids.contains(id.object_id()) {
deleted_ids.insert(*id.object_id(), id.version());
if !newly_generated_ids.contains(obj_id) {
if let Some(object) = by_value_objects.remove(id.object_id()) {
// This object was in the input, and is being deleted. A normal deletion.
debug_assert_eq!(object.version(), id.version());
if matches!(object.owner, Owner::ObjectOwner { .. }) {
// If an object is owned by another object, we are not allowed to directly delete the child
// object because this could lead to a dangling reference of the ownership. Such
// dangling reference can never be dropped. To delete this object, one must either first transfer
// the child object to an account address, or call through Transfer::delete_child_object(),
// which would consume both the child object and the ChildRef ownership reference,
// and emit the DeleteChildObject event. These child objects can be safely deleted.
return (gas_used, 0, Err(SuiError::DeleteObjectOwnedObject));
}
state_view.delete_object(obj_id, id.version(), DeleteKind::Normal);
gas_refund += gas::calculate_object_deletion_refund(&object);
} else {
// This object wasn't in the input, and is being deleted. It must
// be unwrapped in this transaction and then get deleted.
// When an object was wrapped at version `v`, we added an record into `parent_sync`
// with version `v+1` along with OBJECT_DIGEST_WRAPPED. Now when the object is unwrapped,
// it will also have version `v+1`, leading to a violation of the invariant that any
// object_id and version pair must be unique. Hence for any object that's just unwrapped,
// we force incrementing its version number again to make it `v+2` before writing to the store.
state_view.delete_object(
obj_id,
id.version().increment(),
DeleteKind::UnwrapThenDelete,
);
}
}
Ok(())
}
EventType::ShareObject => Err(SuiError::UnsupportedSharedObjectError),
EventType::DeleteChildObject => {
match type_ {
TypeTag::Struct(s) => {
let obj = MoveObject::new(s, event_bytes);
deleted_ids.insert(obj.id(), obj.version());
deleted_child_ids.insert(obj.id());
},
_ => unreachable!(
"Native function delete_child_object_internal<T> ensures that T is always bound to structs"
),
}
let id_bytes: AccountAddress = bcs::from_bytes(&event_bytes).unwrap();
let obj_id: ObjectID = id_bytes.into();
// unwrap safe since to delete a child object, this child object
// must be passed by value in the input.
let object = by_value_objects.remove(&obj_id).unwrap();
state_view.delete_object(&obj_id, object.version(), DeleteKind::Normal);
gas_refund += gas::calculate_object_deletion_refund(&object);
Ok(())
}
EventType::User => {
Expand All @@ -603,32 +624,8 @@ fn process_successful_execution<
// any object left in `by_value_objects` is an input passed by value that was not transferred or frozen.
// this means that either the object was (1) deleted from the Sui system altogether, or
// (2) wrapped inside another object that is in the Sui object pool
let mut gas_refund: u64 = 0;
for (id, object) in by_value_objects.iter() {
// If an object is owned by another object, we are not allowed to directly delete the child
// object because this could lead to a dangling reference of the ownership. Such
// dangling reference can never be dropped. To delete this object, one must either first transfer
// the child object to an account address, or call through Transfer::delete_child_object(),
// which would consume both the child object and the ChildRef ownership reference,
// and emit the DeleteChildObject event. These child objects can be safely deleted.
if matches!(object.owner, Owner::ObjectOwner { .. }) && !deleted_child_ids.contains(id) {
return (gas_used, 0, Err(SuiError::DeleteObjectOwnedObject));
}
if deleted_ids.contains_key(id) {
state_view.delete_object(id, object.version(), DeleteKind::Normal);
} else {
state_view.delete_object(id, object.version(), DeleteKind::Wrap);
}

gas_refund += gas::calculate_object_deletion_refund(object);
}

// Any id that's not covered in the above loop, i.e. not in input but also deleted,
// must be unwrapped and then deleted.
for (id, version) in deleted_ids {
if !by_value_objects.contains_key(&id) {
state_view.delete_object(&id, version, DeleteKind::UnwrapThenDelete);
}
state_view.delete_object(id, object.version(), DeleteKind::Wrap);
}

(gas_used, gas_refund, Ok(()))
Expand All @@ -647,6 +644,7 @@ fn handle_transfer<
gas_used: &mut u64,
state_view: &mut S,
object_owner_map: &mut HashMap<SuiAddress, SuiAddress>,
newly_generated_ids: &HashSet<ObjectID>,
) -> SuiResult {
match type_ {
TypeTag::Struct(s_type) => {
Expand All @@ -662,12 +660,26 @@ fn handle_transfer<
// freshly created, this means that its version will now be 1.
// thus, all objects in the global object pool have version > 0
move_obj.increment_version();
let obj_id = move_obj.id();
// A to-be-transferred object can come from 3 sources:
// 1. Passed in by-value (in `by_value_objects`, i.e. old_object is not none)
// 2. Created in this transaction (in `newly_generated_ids`)
// 3. Unwrapped in this transaction
// The following condition checks if this object was unwrapped in this transaction.
if old_object.is_none() && !newly_generated_ids.contains(&obj_id) {
// When an object was wrapped at version `v`, we added an record into `parent_sync`
// with version `v+1` along with OBJECT_DIGEST_WRAPPED. Now when the object is unwrapped,
// it will also have version `v+1`, leading to a violation of the invariant that any
// object_id and version pair must be unique. Hence for any object that's just unwrapped,
// we force incrementing its version number again to make it `v+2` before writing to the store.
move_obj.increment_version();
}
let obj = Object::new_move(move_obj, recipient, tx_digest);
if old_object.is_none() {
// Charge extra gas based on object size if we are creating a new object.
*gas_used += gas::calculate_object_creation_cost(&obj);
}
let obj_address: SuiAddress = obj.id().into();
let obj_address: SuiAddress = obj_id.into();
object_owner_map.remove(&obj_address);
if let Owner::ObjectOwner(new_owner) = recipient {
// Below we check whether the transfer introduced any circular ownership.
Expand Down
Loading

0 comments on commit 11af754

Please sign in to comment.