From d6238aef53b1f34540d3d1498daee0f137bc911d Mon Sep 17 00:00:00 2001 From: Toby Hutton Date: Mon, 3 Apr 2023 07:30:34 +1000 Subject: [PATCH] IR explicit pointer refactor. (#4336) --- .../src/asm_generation/evm/evm_asm_builder.rs | 107 +- sway-core/src/asm_generation/from_ir.rs | 46 +- .../fuel/abstract_instruction_set.rs | 21 +- .../asm_generation/fuel/fuel_asm_builder.rs | 1873 ++++++----------- .../src/asm_generation/fuel/functions.rs | 176 +- .../asm_generation/fuel/register_allocator.rs | 15 +- .../miden_vm/miden_vm_asm_builder.rs | 39 +- .../src/asm_generation/programs/abstract.rs | 9 +- .../dead_code_analysis.rs | 5 +- sway-core/src/ir_generation.rs | 8 +- sway-core/src/ir_generation/compile.rs | 135 +- sway-core/src/ir_generation/const_eval.rs | 20 +- sway-core/src/ir_generation/convert.rs | 8 +- sway-core/src/ir_generation/function.rs | 1608 +++++++------- sway-core/src/ir_generation/types.rs | 4 +- .../src/language/ty/expression/expression.rs | 5 +- .../ty/expression/expression_variant.rs | 42 +- sway-core/src/lib.rs | 29 +- .../src/monomorphize/gather/expression.rs | 5 +- .../src/monomorphize/instruct/expression.rs | 5 +- .../ast_node/declaration/impl_trait.rs | 5 +- .../ast_node/expression/typed_expression.rs | 2 + .../semantic_analysis/cei_pattern_analysis.rs | 14 +- .../semantic_analysis/storage_only_types.rs | 5 +- sway-core/src/type_system/engine.rs | 11 + sway-core/src/type_system/info.rs | 10 + sway-ir/src/asm.rs | 5 + sway-ir/src/block.rs | 46 +- sway-ir/src/error.rs | 225 +- sway-ir/src/function.rs | 58 +- sway-ir/src/instruction.rs | 467 ++-- sway-ir/src/irtype.rs | 38 +- sway-ir/src/local_var.rs | 27 +- sway-ir/src/optimize.rs | 20 +- sway-ir/src/optimize/arg_demotion.rs | 291 +++ sway-ir/src/optimize/const_demotion.rs | 84 + sway-ir/src/optimize/constants.rs | 109 +- sway-ir/src/optimize/inline.rs | 118 +- sway-ir/src/optimize/mem2reg.rs | 18 +- sway-ir/src/optimize/memcpyopt.rs | 268 +++ sway-ir/src/optimize/misc_demotion.rs | 303 +++ sway-ir/src/optimize/ret_demotion.rs | 177 ++ sway-ir/src/optimize/target_fuel.rs | 14 + sway-ir/src/parser.rs | 234 +- sway-ir/src/pass_manager.rs | 19 +- sway-ir/src/printer.rs | 182 +- sway-ir/src/value.rs | 28 +- sway-ir/src/verify.rs | 434 ++-- sway-ir/tests/constants/contract_calls.ir | 72 - sway-ir/tests/constants/insert_value.ir | 54 - sway-ir/tests/demote_arg/demote_arg00.ir | 27 + sway-ir/tests/demote_arg/demote_arg01.ir | 31 + sway-ir/tests/demote_arg/demote_arg02.ir | 67 + sway-ir/tests/demote_arg/demote_arg03.ir | 51 + sway-ir/tests/demote_arg/demote_arg04.ir | 59 + sway-ir/tests/demote_const/demote_const00.ir | 17 + sway-ir/tests/demote_const/demote_const01.ir | 17 + .../tests/demote_misc/demote_asm_block_arg.ir | 25 + .../tests/demote_misc/demote_asm_block_ret.ir | 17 + sway-ir/tests/demote_misc/demote_log.ir | 19 + .../tests/demote_misc/demote_ptr_to_int.ir | 25 + sway-ir/tests/demote_ret/demote_ret00.ir | 32 + sway-ir/tests/demote_ret/demote_ret01.ir | 80 + sway-ir/tests/inline/bigger.ir | 12 +- sway-ir/tests/inline/get_storage_key.ir | 26 +- sway-ir/tests/inline/int_to_ptr.ir | 26 +- sway-ir/tests/mem2reg/is_prime.ir | 34 +- sway-ir/tests/mem2reg/while_loops.ir | 74 +- sway-ir/tests/memcpyopt/ret_value.ir | 37 + sway-ir/tests/serialize/intrinsic_addr_of.ir | 8 +- sway-ir/tests/serialize/mem_copy.ir | 6 +- sway-ir/tests/serialize/storage_load.ir | 16 +- sway-ir/tests/serialize/storage_store.ir | 10 +- sway-ir/tests/simplify_cfg/dead_blocks.ir | 2 +- sway-ir/tests/tests.rs | 78 +- sway-lib-core/src/ops.sw | 4 +- sway-lsp/src/traverse/typed_tree.rs | 5 +- .../language/addrof_intrinsic/test.toml | 7 +- .../language/b256_bitwise_ops/src/main.sw | 35 +- .../configurable_consts/json_abi_oracle.json | 16 +- .../array_of_structs_caller/src/main.sw | 2 +- .../call_basic_storage/src/main.sw | 2 +- .../src/main.sw | 2 +- .../call_increment_contract/src/main.sw | 2 +- .../caller_context_test/src/main.sw | 2 +- .../get_storage_key_caller/src/main.sw | 2 +- .../nested_struct_args_caller/src/main.sw | 2 +- .../storage_access_caller/src/main.sw | 2 +- .../token_ops_test/src/main.sw | 2 +- .../should_pass/stdlib/vec/src/main.sw | 112 +- .../test_contracts/basic_storage/src/main.sw | 30 +- test/src/ir_generation/mod.rs | 42 +- test/src/ir_generation/tests/array_simple.sw | 7 +- test/src/ir_generation/tests/b256_immeds.sw | 2 +- test/src/ir_generation/tests/configs_2.sw | 20 +- test/src/ir_generation/tests/configs_3.sw | 18 +- test/src/ir_generation/tests/configs_4.sw | 12 +- test/src/ir_generation/tests/enum.sw | 46 +- test/src/ir_generation/tests/enum_enum.sw | 29 +- .../tests/enum_in_storage_read.sw | 16 +- .../tests/enum_in_storage_write.sw | 26 +- test/src/ir_generation/tests/enum_struct.sw | 37 +- .../ir_generation/tests/enum_struct_string.sw | 8 +- test/src/ir_generation/tests/fn_call.sw | 3 + .../tests/fn_call_ret_by_ref_explicit.sw | 48 +- .../tests/fn_call_ret_by_ref_implicit.sw | 54 +- .../ir_generation/tests/get_storage_key.sw | 12 +- test/src/ir_generation/tests/gtf.sw | 13 +- test/src/ir_generation/tests/if_let_simple.sw | 23 +- .../tests/impl_self_reassignment.sw | 14 +- .../ir_generation/tests/local_const_init.sw | 2 +- test/src/ir_generation/tests/main_retd.sw | 14 +- .../src/ir_generation/tests/mutable_struct.sw | 8 +- .../tests/return_stmt_structs.sw | 14 +- .../ir_generation/tests/shadowed_locals.sw | 15 +- .../tests/shadowed_struct_init.sw | 8 +- .../ir_generation/tests/simple_contract.sw | 31 +- .../tests/simple_contract_call.sw | 166 +- test/src/ir_generation/tests/smo.sw | 41 +- test/src/ir_generation/tests/strings.sw | 10 +- .../ir_generation/tests/strings_in_storage.sw | 21 +- test/src/ir_generation/tests/struct.sw | 25 +- test/src/ir_generation/tests/struct_enum.sw | 33 +- test/src/ir_generation/tests/struct_struct.sw | 51 +- test/src/ir_generation/tests/trait.sw | 14 +- .../ir_generation/tests/unit_type_variants.sw | 13 +- 126 files changed, 5303 insertions(+), 4114 deletions(-) create mode 100644 sway-ir/src/optimize/arg_demotion.rs create mode 100644 sway-ir/src/optimize/const_demotion.rs create mode 100644 sway-ir/src/optimize/memcpyopt.rs create mode 100644 sway-ir/src/optimize/misc_demotion.rs create mode 100644 sway-ir/src/optimize/ret_demotion.rs create mode 100644 sway-ir/src/optimize/target_fuel.rs delete mode 100644 sway-ir/tests/constants/contract_calls.ir delete mode 100644 sway-ir/tests/constants/insert_value.ir create mode 100644 sway-ir/tests/demote_arg/demote_arg00.ir create mode 100644 sway-ir/tests/demote_arg/demote_arg01.ir create mode 100644 sway-ir/tests/demote_arg/demote_arg02.ir create mode 100644 sway-ir/tests/demote_arg/demote_arg03.ir create mode 100644 sway-ir/tests/demote_arg/demote_arg04.ir create mode 100644 sway-ir/tests/demote_const/demote_const00.ir create mode 100644 sway-ir/tests/demote_const/demote_const01.ir create mode 100644 sway-ir/tests/demote_misc/demote_asm_block_arg.ir create mode 100644 sway-ir/tests/demote_misc/demote_asm_block_ret.ir create mode 100644 sway-ir/tests/demote_misc/demote_log.ir create mode 100644 sway-ir/tests/demote_misc/demote_ptr_to_int.ir create mode 100644 sway-ir/tests/demote_ret/demote_ret00.ir create mode 100644 sway-ir/tests/demote_ret/demote_ret01.ir create mode 100644 sway-ir/tests/memcpyopt/ret_value.ir diff --git a/sway-core/src/asm_generation/evm/evm_asm_builder.rs b/sway-core/src/asm_generation/evm/evm_asm_builder.rs index 6119ba18e62..932c1cd547b 100644 --- a/sway-core/src/asm_generation/evm/evm_asm_builder.rs +++ b/sway-core/src/asm_generation/evm/evm_asm_builder.rs @@ -293,7 +293,6 @@ impl<'ir> EvmAsmBuilder<'ir> { let mut errors = Vec::new(); if let Some(instruction) = instr_val.get_instruction(self.context) { match instruction { - Instruction::AddrOf(arg) => self.compile_addr_of(instr_val, arg), Instruction::AsmBlock(asm, args) => { check!( self.compile_asm_block(instr_val, asm, args), @@ -308,9 +307,7 @@ impl<'ir> EvmAsmBuilder<'ir> { } Instruction::Branch(to_block) => self.compile_branch(to_block), Instruction::Call(func, args) => self.compile_call(instr_val, func, args), - Instruction::CastPtr(val, ty, offs) => { - self.compile_cast_ptr(instr_val, val, ty, *offs) - } + Instruction::CastPtr(val, ty) => self.compile_cast_ptr(instr_val, val, ty), Instruction::Cmp(pred, lhs_value, rhs_value) => { self.compile_cmp(instr_val, pred, lhs_value, rhs_value) } @@ -331,14 +328,6 @@ impl<'ir> EvmAsmBuilder<'ir> { gas, .. } => self.compile_contract_call(instr_val, params, coins, asset_id, gas), - Instruction::ExtractElement { - array, - ty, - index_val, - } => self.compile_extract_element(instr_val, array, ty, index_val), - Instruction::ExtractValue { - aggregate, indices, .. - } => self.compile_extract_value(instr_val, aggregate, indices), Instruction::FuelVm(fuel_vm_instr) => { errors.push(CompileError::Internal( "Invalid FuelVM IR instruction provided to the EVM code gen.", @@ -347,19 +336,12 @@ impl<'ir> EvmAsmBuilder<'ir> { .unwrap_or_else(Self::empty_span), )); } - Instruction::GetLocal(local_var) => self.compile_get_local(instr_val, local_var), - Instruction::InsertElement { - array, - ty, - value, - index_val, - } => self.compile_insert_element(instr_val, array, ty, value, index_val), - Instruction::InsertValue { - aggregate, - value, + Instruction::GetElemPtr { + base, + elem_ptr_ty, indices, - .. - } => self.compile_insert_value(instr_val, aggregate, value, indices), + } => self.compile_get_elem_ptr(instr_val, base, elem_ptr_ty, indices), + Instruction::GetLocal(local_var) => self.compile_get_local(instr_val, local_var), Instruction::IntToPtr(val, _) => self.compile_int_to_ptr(instr_val, val), Instruction::Load(src_val) => check!( self.compile_load(instr_val, src_val), @@ -367,12 +349,19 @@ impl<'ir> EvmAsmBuilder<'ir> { warnings, errors ), - Instruction::MemCopy { - dst_val, - src_val, + Instruction::MemCopyBytes { + dst_val_ptr, + src_val_ptr, byte_len, - } => self.compile_mem_copy(instr_val, dst_val, src_val, *byte_len), + } => self.compile_mem_copy_bytes(instr_val, dst_val_ptr, src_val_ptr, *byte_len), + Instruction::MemCopyVal { + dst_val_ptr, + src_val_ptr, + } => self.compile_mem_copy_val(instr_val, dst_val_ptr, src_val_ptr), Instruction::Nop => (), + Instruction::PtrToInt(ptr_val, int_ty) => { + self.compile_ptr_to_int(instr_val, ptr_val, int_ty) + } Instruction::Ret(ret_val, ty) => { if func_is_entry { self.compile_ret_from_entry(instr_val, ret_val, ty) @@ -381,7 +370,7 @@ impl<'ir> EvmAsmBuilder<'ir> { } } Instruction::Store { - dst_val, + dst_val_ptr: dst_val, stored_val, } => check!( self.compile_store(instr_val, dst_val, stored_val), @@ -410,10 +399,6 @@ impl<'ir> EvmAsmBuilder<'ir> { todo!(); } - fn compile_addr_of(&mut self, instr_val: &Value, arg: &Value) { - todo!(); - } - fn compile_bitcast(&mut self, instr_val: &Value, bitcast_val: &Value, to_type: &Type) { todo!(); } @@ -432,7 +417,7 @@ impl<'ir> EvmAsmBuilder<'ir> { todo!(); } - fn compile_cast_ptr(&mut self, instr_val: &Value, val: &Value, ty: &Type, offs: u64) { + fn compile_cast_ptr(&mut self, instr_val: &Value, val: &Value, ty: &Type) { todo!(); } @@ -471,72 +456,60 @@ impl<'ir> EvmAsmBuilder<'ir> { todo!(); } - fn compile_extract_element( + fn compile_get_storage_key(&mut self, instr_val: &Value) -> CompileResult<()> { + todo!(); + } + + fn compile_get_elem_ptr( &mut self, instr_val: &Value, - array: &Value, - ty: &Type, - index_val: &Value, + base: &Value, + elem_ptr_ty: &Type, + indices: &[Value], ) { todo!(); } - fn compile_extract_value(&mut self, instr_val: &Value, aggregate_val: &Value, indices: &[u64]) { + fn compile_get_local(&mut self, instr_val: &Value, local_var: &LocalVar) { todo!(); } - fn compile_get_storage_key(&mut self, instr_val: &Value) -> CompileResult<()> { + fn compile_gtf(&mut self, instr_val: &Value, index: &Value, tx_field_id: u64) { todo!(); } - fn compile_get_local(&mut self, instr_val: &Value, local_var: &LocalVar) { + fn compile_int_to_ptr(&mut self, instr_val: &Value, int_to_ptr_val: &Value) { todo!(); } - fn compile_gtf(&mut self, instr_val: &Value, index: &Value, tx_field_id: u64) { + fn compile_load(&mut self, instr_val: &Value, src_val: &Value) -> CompileResult<()> { todo!(); } - fn compile_insert_element( - &mut self, - instr_val: &Value, - array: &Value, - ty: &Type, - value: &Value, - index_val: &Value, - ) { + fn compile_log(&mut self, instr_val: &Value, log_val: &Value, log_ty: &Type, log_id: &Value) { todo!(); } - fn compile_insert_value( + fn compile_mem_copy_bytes( &mut self, instr_val: &Value, - aggregate_val: &Value, - value: &Value, - indices: &[u64], + dst_val_ptr: &Value, + src_val_ptr: &Value, + byte_len: u64, ) { todo!(); } - fn compile_int_to_ptr(&mut self, instr_val: &Value, int_to_ptr_val: &Value) { - todo!(); - } - - fn compile_load(&mut self, instr_val: &Value, src_val: &Value) -> CompileResult<()> { - todo!(); - } - - fn compile_mem_copy( + fn compile_mem_copy_val( &mut self, instr_val: &Value, - dst_val: &Value, - src_val: &Value, - byte_len: u64, + dst_val_ptr: &Value, + src_val_ptr: &Value, ) { todo!(); } - fn compile_log(&mut self, instr_val: &Value, log_val: &Value, log_ty: &Type, log_id: &Value) { + fn compile_ptr_to_int(&mut self, instr_val: &Value, ptr_val: &Value, int_ty: &Type) { todo!(); } diff --git a/sway-core/src/asm_generation/from_ir.rs b/sway-core/src/asm_generation/from_ir.rs index 864bd7c6397..2a76031bcf6 100644 --- a/sway-core/src/asm_generation/from_ir.rs +++ b/sway-core/src/asm_generation/from_ir.rs @@ -201,7 +201,7 @@ pub enum StateAccessType { pub(crate) fn ir_type_size_in_bytes(context: &Context, ty: &Type) -> u64 { match ty.get_content(context) { - TypeContent::Unit | TypeContent::Bool | TypeContent::Uint(_) => 8, + TypeContent::Unit | TypeContent::Bool | TypeContent::Uint(_) | TypeContent::Pointer(_) => 8, TypeContent::Slice => 16, TypeContent::B256 => 32, TypeContent::String(n) => size_bytes_round_up_to_word_alignment!(*n), @@ -223,47 +223,3 @@ pub(crate) fn ir_type_size_in_bytes(context: &Context, ty: &Type) -> u64 { } } } - -// Aggregate (nested) field offset in words and size in bytes. -pub(crate) fn aggregate_idcs_to_field_layout( - context: &Context, - ty: &Type, - idcs: &[u64], -) -> ((u64, u64), Type) { - idcs.iter().fold(((0, 0), *ty), |((offs, _), ty), idx| { - if ty.is_struct(context) { - let idx = *idx as usize; - let field_types = ty.get_field_types(context); - let field_type = field_types[idx]; - let field_offs_in_bytes = field_types - .iter() - .take(idx) - .map(|field_ty| ir_type_size_in_bytes(context, field_ty)) - .sum::(); - let field_size_in_bytes = ir_type_size_in_bytes(context, &field_type); - - ( - ( - offs + size_bytes_in_words!(field_offs_in_bytes), - field_size_in_bytes, - ), - field_type, - ) - } else if ty.is_union(context) { - let idx = *idx as usize; - let field_type = ty.get_field_types(context)[idx]; - let union_size_in_bytes = ir_type_size_in_bytes(context, &ty); - let field_size_in_bytes = ir_type_size_in_bytes(context, &field_type); - // The union fields are at offset (union_size - variant_size) due to left padding. - ( - ( - offs + size_bytes_in_words!(union_size_in_bytes - field_size_in_bytes), - field_size_in_bytes, - ), - field_type, - ) - } else { - panic!("Attempt to access field in non-aggregate.") - } - }) -} diff --git a/sway-core/src/asm_generation/fuel/abstract_instruction_set.rs b/sway-core/src/asm_generation/fuel/abstract_instruction_set.rs index 6ec2dfa9b77..0f9a8e353d7 100644 --- a/sway-core/src/asm_generation/fuel/abstract_instruction_set.rs +++ b/sway-core/src/asm_generation/fuel/abstract_instruction_set.rs @@ -164,7 +164,9 @@ impl AbstractInstructionSet { /// algorithm (https://en.wikipedia.org/wiki/Chaitin%27s_algorithm). The individual steps of /// the algorithm are thoroughly explained in register_allocator.rs. /// - pub(crate) fn allocate_registers(self) -> AllocatedAbstractInstructionSet { + pub(crate) fn allocate_registers( + self, + ) -> Result { // Step 1: Liveness Analysis. let live_out = register_allocator::liveness_analysis(&self.ops); @@ -183,8 +185,21 @@ impl AbstractInstructionSet { // each colorable node and its neighbors. let mut stack = register_allocator::color_interference_graph(&mut interference_graph); + // Uncomment the following to get some idea of which function is failing to complete + // register allocation. The last comment printed will indicate the current function name. + // This will be unnecessary once we have the new register allocator, coming very soon! + // + //let comment = self.ops.iter().find_map(|op| { + // if let Either::Right(crate::asm_lang::ControlFlowOp::Label(_)) = op.opcode { + // Some(op.comment.clone()) + // } else { + // None + // } + //}); + //dbg!(comment); + // Step 5: Use the stack to assign a register for each virtual register. - let pool = register_allocator::assign_registers(&mut stack); + let pool = register_allocator::assign_registers(&mut stack)?; // Step 6: Update all instructions to use the resulting register pool. let mut buf = vec![]; @@ -196,7 +211,7 @@ impl AbstractInstructionSet { }) } - AllocatedAbstractInstructionSet { ops: buf } + Ok(AllocatedAbstractInstructionSet { ops: buf }) } } diff --git a/sway-core/src/asm_generation/fuel/fuel_asm_builder.rs b/sway-core/src/asm_generation/fuel/fuel_asm_builder.rs index 86028f4eff1..b9589e884a4 100644 --- a/sway-core/src/asm_generation/fuel/fuel_asm_builder.rs +++ b/sway-core/src/asm_generation/fuel/fuel_asm_builder.rs @@ -1,9 +1,7 @@ use crate::{ asm_generation::{ asm_builder::{AsmBuilder, AsmBuilderResult}, - from_ir::{ - aggregate_idcs_to_field_layout, ir_type_size_in_bytes, StateAccessType, Storage, - }, + from_ir::{ir_type_size_in_bytes, StateAccessType, Storage}, fuel::{ abstract_instruction_set::AbstractInstructionSet, compiler_constants, @@ -17,15 +15,14 @@ use crate::{ error::*, fuel_prelude::fuel_crypto::Hasher, metadata::MetadataManager, - size_bytes_in_words, }; -use sway_error::warning::CompileWarning; -use sway_error::{error::CompileError, warning::Warning}; + +use sway_error::{error::CompileError, warning::CompileWarning, warning::Warning}; use sway_ir::*; use sway_types::{span::Span, Spanned}; use either::Either; -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; pub struct FuelAsmBuilder<'ir> { pub(super) program_kind: ProgramKind, @@ -121,23 +118,6 @@ impl<'ir> FuelAsmBuilder<'ir> { } } - // This is here temporarily for in the case when the IR can't absolutely provide a valid span, - // until we can improve ASM block parsing and verification mostly. It's where it's needed the - // most, for returning failure errors. If we move ASM verification to the parser and semantic - // analysis then ASM block conversion shouldn't/can't fail and we won't need to provide a - // guaranteed to be available span. - fn empty_span() -> Span { - let msg = "unknown source location"; - Span::new(Arc::from(msg), 0, msg.len(), None).unwrap() - } - - pub(super) fn insert_block_label(&mut self, block: Block) { - if &block.get_label(self.context) != "entry" { - let label = self.block_to_label(&block); - self.cur_bytecode.push(Op::unowned_jump_label(label)) - } - } - pub fn finalize(&self) -> AsmBuilderResult { AsmBuilderResult::Fuel(( self.data_section.clone(), @@ -162,28 +142,31 @@ impl<'ir> FuelAsmBuilder<'ir> { instr_val: &Value, func_is_entry: bool, ) -> CompileResult<()> { - let mut warnings = Vec::new(); - let mut errors = Vec::new(); - if let Some(instruction) = instr_val.get_instruction(self.context) { + let Some(instruction) = instr_val.get_instruction(self.context) else { + return err(vec![], vec![CompileError::Internal( + "Value not an instruction.", + self.md_mgr + .val_to_span(self.context, *instr_val) + .unwrap_or_else(Span::dummy), + )]); + }; + + // The only instruction whose compilation returns a CompileResult itself is AsmBlock, which + // we special-case here. Ideally, the ASM block verification would happen much sooner, + // perhaps during parsing. https://github.com/FuelLabs/sway/issues/801 + if let Instruction::AsmBlock(asm, args) = instruction { + self.compile_asm_block(instr_val, asm, args) + } else { + // These matches all return `Result<(), CompileError>`. match instruction { - Instruction::AddrOf(arg) => self.compile_addr_of(instr_val, arg), - Instruction::AsmBlock(asm, args) => { - check!( - self.compile_asm_block(instr_val, asm, args), - return err(warnings, errors), - warnings, - errors - ) - } + Instruction::AsmBlock(..) => unreachable!("Handled immediately above."), Instruction::BitCast(val, ty) => self.compile_bitcast(instr_val, val, ty), Instruction::BinaryOp { op, arg1, arg2 } => { self.compile_binary_op(instr_val, op, arg1, arg2) } Instruction::Branch(to_block) => self.compile_branch(to_block), Instruction::Call(func, args) => self.compile_call(instr_val, func, args), - Instruction::CastPtr(val, ty, offs) => { - self.compile_cast_ptr(instr_val, val, ty, *offs) - } + Instruction::CastPtr(val, _ty) => self.compile_no_op_move(instr_val, val), Instruction::Cmp(pred, lhs_value, rhs_value) => { self.compile_cmp(instr_val, pred, lhs_value, rhs_value) } @@ -191,12 +174,7 @@ impl<'ir> FuelAsmBuilder<'ir> { cond_value, true_block, false_block, - } => check!( - self.compile_conditional_branch(cond_value, true_block, false_block), - return err(warnings, errors), - warnings, - errors - ), + } => self.compile_conditional_branch(cond_value, true_block, false_block), Instruction::ContractCall { params, coins, @@ -204,22 +182,9 @@ impl<'ir> FuelAsmBuilder<'ir> { gas, .. } => self.compile_contract_call(instr_val, params, coins, asset_id, gas), - Instruction::ExtractElement { - array, - ty, - index_val, - } => self.compile_extract_element(instr_val, array, ty, index_val), - Instruction::ExtractValue { - aggregate, indices, .. - } => self.compile_extract_value(instr_val, aggregate, indices), Instruction::FuelVm(fuel_vm_instr) => match fuel_vm_instr { - FuelVmInstruction::GetStorageKey => { - check!( - self.compile_get_storage_key(instr_val), - return err(warnings, errors), - warnings, - errors - ) + FuelVmInstruction::GetStorageKey(_ty) => { + self.compile_get_storage_key(instr_val) } FuelVmInstruction::Gtf { index, tx_field_id } => { self.compile_gtf(instr_val, index, *tx_field_id) @@ -230,7 +195,8 @@ impl<'ir> FuelAsmBuilder<'ir> { log_id, } => self.compile_log(instr_val, log_val, log_ty, log_id), FuelVmInstruction::ReadRegister(reg) => { - self.compile_read_register(instr_val, reg) + self.compile_read_register(instr_val, reg); + Ok(()) } FuelVmInstruction::Revert(revert_val) => { self.compile_revert(instr_val, revert_val) @@ -250,83 +216,57 @@ impl<'ir> FuelAsmBuilder<'ir> { FuelVmInstruction::StateClear { key, number_of_slots, - } => check!( - self.compile_state_clear(instr_val, key, number_of_slots,), - return err(warnings, errors), - warnings, - errors - ), + } => self.compile_state_clear(instr_val, key, number_of_slots), FuelVmInstruction::StateLoadQuadWord { load_val, key, number_of_slots, - } => check!( - self.compile_state_access_quad_word( - instr_val, - load_val, - key, - number_of_slots, - StateAccessType::Read - ), - return err(warnings, errors), - warnings, - errors - ), - FuelVmInstruction::StateLoadWord(key) => check!( - self.compile_state_load_word(instr_val, key), - return err(warnings, errors), - warnings, - errors + } => self.compile_state_access_quad_word( + instr_val, + load_val, + key, + number_of_slots, + StateAccessType::Read, ), + FuelVmInstruction::StateLoadWord(key) => { + self.compile_state_load_word(instr_val, key) + } FuelVmInstruction::StateStoreQuadWord { stored_val, key, number_of_slots, - } => check!( - self.compile_state_access_quad_word( - instr_val, - stored_val, - key, - number_of_slots, - StateAccessType::Write - ), - return err(warnings, errors), - warnings, - errors - ), - FuelVmInstruction::StateStoreWord { stored_val, key } => check!( - self.compile_state_store_word(instr_val, stored_val, key), - return err(warnings, errors), - warnings, - errors + } => self.compile_state_access_quad_word( + instr_val, + stored_val, + key, + number_of_slots, + StateAccessType::Write, ), + FuelVmInstruction::StateStoreWord { stored_val, key } => { + self.compile_state_store_word(instr_val, stored_val, key) + } }, - Instruction::GetLocal(local_var) => self.compile_get_local(instr_val, local_var), - Instruction::InsertElement { - array, - ty, - value, - index_val, - } => self.compile_insert_element(instr_val, array, ty, value, index_val), - Instruction::InsertValue { - aggregate, - value, + Instruction::GetElemPtr { + base, + elem_ptr_ty, indices, - .. - } => self.compile_insert_value(instr_val, aggregate, value, indices), - Instruction::IntToPtr(val, _) => self.compile_int_to_ptr(instr_val, val), - Instruction::Load(src_val) => check!( - self.compile_load(instr_val, src_val), - return err(warnings, errors), - warnings, - errors - ), - Instruction::MemCopy { - dst_val, - src_val, + } => self.compile_get_elem_ptr(instr_val, base, elem_ptr_ty, indices), + Instruction::GetLocal(local_var) => self.compile_get_local(instr_val, local_var), + Instruction::IntToPtr(val, _) => self.compile_no_op_move(instr_val, val), + Instruction::Load(src_val) => self.compile_load(instr_val, src_val), + Instruction::MemCopyBytes { + dst_val_ptr, + src_val_ptr, byte_len, - } => self.compile_mem_copy(instr_val, dst_val, src_val, *byte_len), - Instruction::Nop => (), + } => self.compile_mem_copy_bytes(instr_val, dst_val_ptr, src_val_ptr, *byte_len), + Instruction::MemCopyVal { + dst_val_ptr, + src_val_ptr, + } => self.compile_mem_copy_val(instr_val, dst_val_ptr, src_val_ptr), + Instruction::Nop => Ok(()), + Instruction::PtrToInt(ptr_val, _int_ty) => { + self.compile_no_op_move(instr_val, ptr_val) + } Instruction::Ret(ret_val, ty) => { if func_is_entry { self.compile_ret_from_entry(instr_val, ret_val, ty) @@ -335,31 +275,14 @@ impl<'ir> FuelAsmBuilder<'ir> { } } Instruction::Store { - dst_val, + dst_val_ptr, stored_val, - } => check!( - self.compile_store(instr_val, dst_val, stored_val), - return err(warnings, errors), - warnings, - errors - ), + } => self.compile_store(instr_val, dst_val_ptr, stored_val), } - } else { - errors.push(CompileError::Internal( - "Value not an instruction.", - self.md_mgr - .val_to_span(self.context, *instr_val) - .unwrap_or_else(Self::empty_span), - )); + .into() } - ok((), warnings, errors) } - // OK, I began by trying to translate the IR ASM block data structures back into AST data - // structures which I could feed to the code in asm_generation/expression/mod.rs where it - // compiles the inline ASM. But it's more work to do that than to just re-implement that - // algorithm with the IR data here. - fn compile_asm_block( &mut self, instr_val: &Value, @@ -381,7 +304,13 @@ impl<'ir> FuelAsmBuilder<'ir> { ); let arg_reg = match initializer { Some(init_val) => { - let init_val_reg = self.value_to_register(init_val); + let init_val_reg = match self.value_to_register(init_val) { + Ok(ivr) => ivr, + Err(e) => { + errors.push(e); + return err(warnings, errors); + } + }; match init_val_reg { VirtualRegister::Virtual(_) => init_val_reg, VirtualRegister::Constant(_) => { @@ -441,7 +370,7 @@ impl<'ir> FuelAsmBuilder<'ir> { let op_span = self .md_mgr .md_to_span(self.context, op.metadata) - .unwrap_or_else(Self::empty_span); + .unwrap_or_else(Span::dummy); let opcode = check!( Op::parse_opcode( &op.name, @@ -493,13 +422,13 @@ impl<'ir> FuelAsmBuilder<'ir> { ok((), warnings, errors) } - fn compile_addr_of(&mut self, instr_val: &Value, arg: &Value) { - let reg = self.value_to_register(arg); - self.reg_map.insert(*instr_val, reg); - } - - fn compile_bitcast(&mut self, instr_val: &Value, bitcast_val: &Value, to_type: &Type) { - let val_reg = self.value_to_register(bitcast_val); + fn compile_bitcast( + &mut self, + instr_val: &Value, + bitcast_val: &Value, + to_type: &Type, + ) -> Result<(), CompileError> { + let val_reg = self.value_to_register(bitcast_val)?; let reg = if to_type.is_bool(self.context) { // This may not be necessary if we just treat a non-zero value as 'true'. let res_reg = self.reg_seqr.next(); @@ -528,6 +457,7 @@ impl<'ir> FuelAsmBuilder<'ir> { val_reg }; self.reg_map.insert(*instr_val, reg); + Ok(()) } fn compile_binary_op( @@ -536,9 +466,9 @@ impl<'ir> FuelAsmBuilder<'ir> { op: &BinaryOpKind, arg1: &Value, arg2: &Value, - ) { - let val1_reg = self.value_to_register(arg1); - let val2_reg = self.value_to_register(arg2); + ) -> Result<(), CompileError> { + let val1_reg = self.value_to_register(arg1)?; + let val2_reg = self.value_to_register(arg2)?; let res_reg = self.reg_seqr.next(); let opcode = match op { BinaryOpKind::Add => Either::Left(VirtualOp::ADD(res_reg.clone(), val1_reg, val2_reg)), @@ -556,62 +486,16 @@ impl<'ir> FuelAsmBuilder<'ir> { }); self.reg_map.insert(*instr_val, res_reg); + Ok(()) } - fn compile_branch(&mut self, to_block: &BranchToWithArgs) { - self.compile_branch_to_phi_value(to_block); + fn compile_branch(&mut self, to_block: &BranchToWithArgs) -> Result<(), CompileError> { + self.compile_branch_to_phi_value(to_block)?; let label = self.block_to_label(&to_block.block); self.cur_bytecode.push(Op::jump_to_label(label)); - } - - fn compile_cast_ptr(&mut self, instr_val: &Value, val: &Value, ty: &Type, offs: u64) { - // `cast_ptr` is replicating the old `get_ptr` functionality of casting between reference - // types and indexing. It will be superceded by proper pointers and GEPs when they arrive. - // In the meantime we still need this for storage writes, etc. - // - // The `val` is guaranteed to be a `get_local` instruction, which will have returned an - // address. All we need to worry about is the indexing, where we increment the address by - // the size of the type multiplied by offs. - - let val_reg = self.value_to_register(val); - - if offs == 0 { - // Nothing to do. - self.reg_map.insert(*instr_val, val_reg); - } else { - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - - let ty_size_in_bytes = ir_type_size_in_bytes(self.context, ty); - let offset_in_bytes = ty_size_in_bytes * offs; - let instr_reg = self.reg_seqr.next(); - if offset_in_bytes > compiler_constants::TWELVE_BITS { - self.number_to_reg(offset_in_bytes, &instr_reg, owning_span.clone()); - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - instr_reg.clone(), - self.locals_base_reg().clone(), - instr_reg.clone(), - )), - comment: "get offset for ptr_cast".into(), - owning_span, - }); - } else { - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - instr_reg.clone(), - self.locals_base_reg().clone(), - VirtualImmediate12 { - value: (offset_in_bytes) as u16, - }, - )), - comment: "get offset for ptr_cast".into(), - owning_span, - }); - } - self.reg_map.insert(*instr_val, instr_reg); - } + Ok(()) } fn compile_cmp( @@ -620,9 +504,9 @@ impl<'ir> FuelAsmBuilder<'ir> { pred: &Predicate, lhs_value: &Value, rhs_value: &Value, - ) { - let lhs_reg = self.value_to_register(lhs_value); - let rhs_reg = self.value_to_register(rhs_value); + ) -> Result<(), CompileError> { + let lhs_reg = self.value_to_register(lhs_value)?; + let rhs_reg = self.value_to_register(rhs_value)?; let res_reg = self.reg_seqr.next(); let comment = String::new(); let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); @@ -650,6 +534,7 @@ impl<'ir> FuelAsmBuilder<'ir> { } } self.reg_map.insert(*instr_val, res_reg); + Ok(()) } fn compile_conditional_branch( @@ -657,22 +542,19 @@ impl<'ir> FuelAsmBuilder<'ir> { cond_value: &Value, true_block: &BranchToWithArgs, false_block: &BranchToWithArgs, - ) -> CompileResult<()> { + ) -> Result<(), CompileError> { if true_block.block == false_block.block && true_block.block.num_args(self.context) > 0 { - return err( - Vec::new(), - vec![CompileError::Internal( - "Cannot compile CBR with both branches going to same dest block", - self.md_mgr - .val_to_span(self.context, *cond_value) - .unwrap_or_else(Self::empty_span), - )], - ); + return Err(CompileError::Internal( + "Cannot compile CBR with both branches going to same dest block", + self.md_mgr + .val_to_span(self.context, *cond_value) + .unwrap_or_else(Span::dummy), + )); } - self.compile_branch_to_phi_value(true_block); - self.compile_branch_to_phi_value(false_block); + self.compile_branch_to_phi_value(true_block)?; + self.compile_branch_to_phi_value(false_block)?; - let cond_reg = self.value_to_register(cond_value); + let cond_reg = self.value_to_register(cond_value)?; let true_label = self.block_to_label(&true_block.block); self.cur_bytecode @@ -680,15 +562,25 @@ impl<'ir> FuelAsmBuilder<'ir> { let false_label = self.block_to_label(&false_block.block); self.cur_bytecode.push(Op::jump_to_label(false_label)); - ok((), vec![], vec![]) + + Ok(()) } - fn compile_branch_to_phi_value(&mut self, to_block: &BranchToWithArgs) { + fn compile_branch_to_phi_value( + &mut self, + to_block: &BranchToWithArgs, + ) -> Result<(), CompileError> { for (i, param) in to_block.args.iter().enumerate() { // We only need a MOVE here if param is actually assigned to a register - if let Some(local_reg) = self.opt_value_to_register(param) { - let phi_reg = - self.value_to_register(&to_block.block.get_arg(self.context, i).unwrap()); + if let Ok(local_reg) = self.value_to_register(param) { + let phi_val = to_block.block.get_arg(self.context, i).unwrap(); + let phi_reg = self.value_to_register(&phi_val).unwrap_or_else(|_| { + // We must re-use the arg register, but if this is the first time we've seen it + // we add it to the register map now. + let reg = self.reg_seqr.next(); + self.reg_map.insert(phi_val, reg.clone()); + reg + }); self.cur_bytecode.push(Op::register_move( phi_reg, local_reg, @@ -697,6 +589,7 @@ impl<'ir> FuelAsmBuilder<'ir> { )); } } + Ok(()) } #[allow(clippy::too_many_arguments)] @@ -707,11 +600,11 @@ impl<'ir> FuelAsmBuilder<'ir> { coins: &Value, asset_id: &Value, gas: &Value, - ) { - let ra_pointer = self.value_to_register(params); - let coins_register = self.value_to_register(coins); - let asset_id_register = self.value_to_register(asset_id); - let gas_register = self.value_to_register(gas); + ) -> Result<(), CompileError> { + let ra_pointer = self.value_to_register(params)?; + let coins_register = self.value_to_register(coins)?; + let asset_id_register = self.value_to_register(asset_id)?; + let gas_register = self.value_to_register(gas)?; self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::CALL( @@ -734,198 +627,10 @@ impl<'ir> FuelAsmBuilder<'ir> { None, )); self.reg_map.insert(*instr_val, instr_reg); + Ok(()) } - fn compile_extract_element( - &mut self, - instr_val: &Value, - array: &Value, - ty: &Type, - index_val: &Value, - ) { - // Base register should pointer to some stack allocated memory. - let base_reg = self.value_to_register(array); - - // Index value is the array element index, not byte nor word offset. - let index_reg = self.value_to_register(index_val); - let rel_offset_reg = self.reg_seqr.next(); - - // We could put the OOB check here, though I'm now thinking it would be too wasteful. - // See compile_bounds_assertion() in expression/array.rs (or look in Git history). - - let instr_reg = self.reg_seqr.next(); - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - let elem_type = ty.get_array_elem_type(self.context).unwrap(); - let elem_size = ir_type_size_in_bytes(self.context, &elem_type); - if self.is_copy_type(&elem_type) { - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::MULI( - rel_offset_reg.clone(), - index_reg, - VirtualImmediate12 { value: 8 }, - )), - comment: "extract_element relative offset".into(), - owning_span: owning_span.clone(), - }); - let elem_offs_reg = self.reg_seqr.next(); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - elem_offs_reg.clone(), - base_reg, - rel_offset_reg, - )), - comment: "extract_element absolute offset".into(), - owning_span: owning_span.clone(), - }); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - instr_reg.clone(), - elem_offs_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: "extract_element".into(), - owning_span, - }); - } else { - // Value too big for a register, so we return the memory offset. - if elem_size > compiler_constants::TWELVE_BITS { - let size_data_id = self - .data_section - .insert_data_value(Entry::new_word(elem_size, None, None)); - let size_reg = self.reg_seqr.next(); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::LWDataId(size_reg.clone(), size_data_id)), - owning_span: owning_span.clone(), - comment: "loading element size for relative offset".into(), - }); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::MUL(instr_reg.clone(), index_reg, size_reg)), - comment: "extract_element relative offset".into(), - owning_span: owning_span.clone(), - }); - } else { - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::MULI( - instr_reg.clone(), - index_reg, - VirtualImmediate12 { - value: elem_size as u16, - }, - )), - comment: "extract_element relative offset".into(), - owning_span: owning_span.clone(), - }); - } - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - instr_reg.clone(), - base_reg, - instr_reg.clone(), - )), - comment: "extract_element absolute offset".into(), - owning_span, - }); - } - - self.reg_map.insert(*instr_val, instr_reg); - } - - fn compile_extract_value(&mut self, instr_val: &Value, aggregate_val: &Value, indices: &[u64]) { - // Base register should pointer to some stack allocated memory. - let base_reg = self.value_to_register(aggregate_val); - let ((extract_offset, _), field_type) = aggregate_idcs_to_field_layout( - self.context, - &aggregate_val.get_type(self.context).unwrap(), - indices, - ); - - let instr_reg = self.reg_seqr.next(); - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - if self.is_copy_type(&field_type) { - if extract_offset > compiler_constants::TWELVE_BITS { - let offset_reg = self.reg_seqr.next(); - self.number_to_reg(extract_offset, &offset_reg, owning_span.clone()); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - offset_reg.clone(), - base_reg.clone(), - base_reg, - )), - comment: "add array base to offset".into(), - owning_span: owning_span.clone(), - }); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - instr_reg.clone(), - offset_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: format!( - "extract_value @ {}", - indices - .iter() - .map(|idx| format!("{idx}")) - .collect::>() - .join(",") - ), - owning_span, - }); - } else { - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - instr_reg.clone(), - base_reg, - VirtualImmediate12 { - value: extract_offset as u16, - }, - )), - comment: format!( - "extract_value @ {}", - indices - .iter() - .map(|idx| format!("{idx}")) - .collect::>() - .join(",") - ), - owning_span, - }); - } - } else { - // Value too big for a register, so we return the memory offset. - if extract_offset * 8 > compiler_constants::TWELVE_BITS { - let offset_reg = self.reg_seqr.next(); - self.number_to_reg(extract_offset * 8, &offset_reg, owning_span.clone()); - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - instr_reg.clone(), - base_reg, - offset_reg, - )), - comment: "extract address".into(), - owning_span, - }); - } else { - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - instr_reg.clone(), - base_reg, - VirtualImmediate12 { - value: (extract_offset * 8) as u16, - }, - )), - comment: "extract address".into(), - owning_span, - }); - } - } - - self.reg_map.insert(*instr_val, instr_reg); - } - - fn compile_get_storage_key(&mut self, instr_val: &Value) -> CompileResult<()> { - let warnings: Vec = Vec::new(); - let mut errors: Vec = Vec::new(); - + fn compile_get_storage_key(&mut self, instr_val: &Value) -> Result<(), CompileError> { let state_idx = self.md_mgr.val_to_storage_key(self.context, *instr_val); let instr_span = self.md_mgr.val_to_span(self.context, *instr_val); @@ -938,11 +643,10 @@ impl<'ir> FuelAsmBuilder<'ir> { ) } None => { - errors.push(CompileError::Internal( + return Err(CompileError::Internal( "State index for __get_storage_key is not available as a metadata", - instr_span.unwrap_or_else(Self::empty_span), + instr_span.unwrap_or_else(Span::dummy), )); - return err(warnings, errors); } }; @@ -963,55 +667,228 @@ impl<'ir> FuelAsmBuilder<'ir> { owning_span: instr_span, }); self.reg_map.insert(*instr_val, reg); - ok((), warnings, errors) + + Ok(()) } - fn compile_get_local(&mut self, instr_val: &Value, local_var: &LocalVar) { - // `get_local` is like a `load` except the value isn't dereferenced. + fn compile_get_elem_ptr( + &mut self, + instr_val: &Value, + base_val: &Value, + _elem_ty: &Type, + indices: &[Value], + ) -> Result<(), CompileError> { let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - match self.ptr_map.get(local_var) { - None => unimplemented!("BUG? Uninitialised pointer."), - Some(storage) => match storage.clone() { - Storage::Data(_data_id) => { - // Not sure if we'll ever need this. - unimplemented!("TODO get_ptr() into the data section."); - } - Storage::Stack(word_offs) => { - let offset_in_bytes = word_offs * 8; - let instr_reg = self.reg_seqr.next(); - if offset_in_bytes > compiler_constants::TWELVE_BITS { - self.number_to_reg(offset_in_bytes, &instr_reg, owning_span.clone()); - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - instr_reg.clone(), - self.locals_base_reg().clone(), - instr_reg.clone(), - )), - comment: "get offset reg for get_ptr".into(), - owning_span, - }); + let base_type = base_val + .get_type(self.context) + .and_then(|ty| ty.get_pointee_type(self.context)) + .ok_or_else(|| { + CompileError::Internal( + "Failed to get type of base value for GEP.", + owning_span.as_ref().cloned().unwrap_or_else(Span::dummy), + ) + })?; + + // A utility lambda to unwrap Values which must be constant uints. + let unwrap_constant_uint = |idx_val: &Value| { + idx_val + .get_constant(self.context) + .and_then(|idx_const| { + if let ConstantValue::Uint(idx) = idx_const.value { + Some(idx as usize) } else { - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - instr_reg.clone(), - self.locals_base_reg().clone(), - VirtualImmediate12 { - value: (offset_in_bytes) as u16, - }, - )), - comment: "get offset reg for get_ptr".into(), - owning_span, - }); + None } + }) + .ok_or_else(|| { + CompileError::Internal( + "Failed to convert struct index from constant to integer.", + owning_span.as_ref().cloned().unwrap_or_else(Span::dummy), + ) + }) + }; + + // The indices for a GEP are Values. For structs and unions they are always constant + // uints. For arrays they may be any value expression. So we need to take all the + // individual offsets and add them up. + // + // Ideally, most of the time, there will only be a single constant struct index. And often + // they will be zero, making the GEP a no-op. But if not we need to add the non-constant + // values together. + // + // Eventually this can be optimised with an ASM opt pass which can combine constant + // ADD/ADDIs together. Then we could just emit an ADD for every index at this stage. But + // until then we can keep track of the constant values and add them once. + + let base_reg = self.value_to_register(base_val)?; + let (base_reg, const_offs, _) = + indices + .iter() + .fold(Ok((base_reg, 0, base_type)), |acc, idx_val| { + // So we're folding to a Result, as unwrapping the constants can fail. + acc.and_then(|(reg, offs, elem_ty)| { + // If we find a constant index then we add its offset to `offs`. Otherwise we grab + // its value, which should be compiled already, and add it to reg. + if elem_ty.is_struct(self.context) { + // For structs the index must be a const uint. + unwrap_constant_uint(idx_val).map(|idx| { + let field_types = elem_ty.get_field_types(self.context); + let field_type = field_types[idx]; + let field_offs_in_bytes = field_types + .iter() + .take(idx) + .map(|field_ty| ir_type_size_in_bytes(self.context, field_ty)) + .sum::(); + (reg, offs + field_offs_in_bytes, field_type) + }) + } else if elem_ty.is_union(self.context) { + // For unions the index must also be a const uint. + unwrap_constant_uint(idx_val).map(|idx| { + let field_type = elem_ty.get_field_types(self.context)[idx]; + let union_size_in_bytes = + ir_type_size_in_bytes(self.context, &elem_ty); + let field_size_in_bytes = + ir_type_size_in_bytes(self.context, &field_type); + + // The union fields are at offset (union_size - variant_size) due to left padding. + ( + reg, + offs + union_size_in_bytes - field_size_in_bytes, + field_type, + ) + }) + } else if elem_ty.is_array(self.context) { + // For arrays the index is a value. We need to fetch it and add it to + // the base. + let array_elem_ty = + elem_ty.get_array_elem_type(self.context).ok_or_else(|| { + CompileError::Internal( + "Failed to get elem type for known array", + owning_span.clone().unwrap_or_else(Span::dummy), + ) + })?; + let array_elem_size = + ir_type_size_in_bytes(self.context, &array_elem_ty); + let size_reg = self.reg_seqr.next(); + self.immediate_to_reg( + array_elem_size, + size_reg.clone(), + None, + "get size of element", + owning_span.clone(), + ); + + let index_reg = self.value_to_register(idx_val)?; + let offset_reg = self.reg_seqr.next(); + + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MUL( + offset_reg.clone(), + index_reg, + size_reg, + )), + comment: "get offset to array element".into(), + owning_span: owning_span.clone(), + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD( + offset_reg.clone(), + reg, + offset_reg.clone(), + )), + comment: "add to array base".into(), + owning_span: owning_span.clone(), + }); + let member_type = + elem_ty.get_array_elem_type(self.context).ok_or_else(|| { + CompileError::Internal( + "Can't get array elem type for GEP.", + sway_types::span::Span::dummy(), + ) + })?; + + Ok((offset_reg, offs, member_type)) + } else { + Err(CompileError::Internal( + "Cannot get element offset in non-aggregate.", + sway_types::span::Span::dummy(), + )) + } + }) + })?; + + if const_offs == 0 { + // No need to add anything. + self.reg_map.insert(*instr_val, base_reg); + } else { + let instr_reg = self.reg_seqr.next(); + self.immediate_to_reg( + const_offs, + instr_reg.clone(), + Some(&base_reg), + "get offset to element", + owning_span.clone(), + ); + self.reg_map.insert(*instr_val, instr_reg); + } + + Ok(()) + } + + fn compile_get_local( + &mut self, + instr_val: &Value, + local_var: &LocalVar, + ) -> Result<(), CompileError> { + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + match self.ptr_map.get(local_var) { + Some(Storage::Stack(word_offs)) => { + let offset = word_offs * 8; + if offset == 0 { + self.reg_map + .insert(*instr_val, self.locals_base_reg().clone()); + } else { + let instr_reg = self.reg_seqr.next(); + let base_reg = self.locals_base_reg().clone(); + self.immediate_to_reg( + offset, + instr_reg.clone(), + Some(&base_reg), + "get offset to local", + owning_span, + ); self.reg_map.insert(*instr_val, instr_reg); } - }, + Ok(()) + } + Some(Storage::Data(data_id)) => { + let instr_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LWDataId(instr_reg.clone(), data_id.clone())), + comment: "get local constant".into(), + owning_span, + }); + self.reg_map.insert(*instr_val, instr_reg); + + Ok(()) + } + _ => Err(CompileError::Internal( + "Malformed storage for local var found.", + self.md_mgr + .val_to_span(self.context, *instr_val) + .unwrap_or_else(Span::dummy), + )), } } - fn compile_gtf(&mut self, instr_val: &Value, index: &Value, tx_field_id: u64) { + fn compile_gtf( + &mut self, + instr_val: &Value, + index: &Value, + tx_field_id: u64, + ) -> Result<(), CompileError> { let instr_reg = self.reg_seqr.next(); - let index_reg = self.value_to_register(index); + let index_reg = self.value_to_register(index)?; self.cur_bytecode.push(Op { opcode: either::Either::Left(VirtualOp::GTF( instr_reg.clone(), @@ -1024,332 +901,55 @@ impl<'ir> FuelAsmBuilder<'ir> { owning_span: self.md_mgr.val_to_span(self.context, *instr_val), }); self.reg_map.insert(*instr_val, instr_reg); + Ok(()) } - fn compile_insert_element( - &mut self, - instr_val: &Value, - array: &Value, - ty: &Type, - value: &Value, - index_val: &Value, - ) { - // Base register should point to some stack allocated memory. - let base_reg = self.value_to_register(array); - let insert_reg = self.value_to_register(value); - - // Index value is the array element index, not byte nor word offset. - let index_reg = self.value_to_register(index_val); - let rel_offset_reg = self.reg_seqr.next(); - + fn compile_load(&mut self, instr_val: &Value, src_val: &Value) -> Result<(), CompileError> { let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + if src_val + .get_type(self.context) + .and_then(|src_ty| src_ty.get_pointee_type(self.context)) + .map_or(true, |inner_ty| !self.is_copy_type(&inner_ty)) + { + Err(CompileError::Internal( + "Attempt to load from non-copy type.", + owning_span.unwrap_or_else(Span::dummy), + )) + } else { + let src_reg = self.value_to_register(src_val)?; + let instr_reg = self.reg_seqr.next(); - let elem_type = ty.get_array_elem_type(self.context).unwrap(); - let elem_size = ir_type_size_in_bytes(self.context, &elem_type); - if self.is_copy_type(&elem_type) { - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::MULI( - rel_offset_reg.clone(), - index_reg, - VirtualImmediate12 { value: 8 }, - )), - comment: "insert_element relative offset".into(), - owning_span: owning_span.clone(), - }); - let elem_offs_reg = self.reg_seqr.next(); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - elem_offs_reg.clone(), - base_reg.clone(), - rel_offset_reg, - )), - comment: "insert_element absolute offset".into(), - owning_span: owning_span.clone(), - }); self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - elem_offs_reg, - insert_reg, + opcode: Either::Left(VirtualOp::LW( + instr_reg.clone(), + src_reg, VirtualImmediate12 { value: 0 }, )), - comment: "insert_element".into(), + comment: "load value".into(), owning_span, }); - } else { - // Element size is larger than 8; we switch to bytewise offsets and sizes and use MCP. - if elem_size > compiler_constants::TWELVE_BITS { - todo!("array element size bigger than 4k") - } else { - let elem_index_offs_reg = self.reg_seqr.next(); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::MULI( - elem_index_offs_reg.clone(), - index_reg, - VirtualImmediate12 { - value: elem_size as u16, - }, - )), - comment: "insert_element relative offset".into(), - owning_span: owning_span.clone(), - }); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - elem_index_offs_reg.clone(), - base_reg.clone(), - elem_index_offs_reg.clone(), - )), - comment: "insert_element absolute offset".into(), - owning_span: owning_span.clone(), - }); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCPI( - elem_index_offs_reg, - insert_reg, - VirtualImmediate12 { - value: elem_size as u16, - }, - )), - comment: "insert_element store value".into(), - owning_span, - }); - } - } - // We set the 'instruction' register to the base register, so that cascading inserts will - // work. - self.reg_map.insert(*instr_val, base_reg); - } - - fn compile_insert_value( - &mut self, - instr_val: &Value, - aggregate_val: &Value, - value: &Value, - indices: &[u64], - ) { - // Base register should point to some stack allocated memory. - let base_reg = self.value_to_register(aggregate_val); - - let insert_reg = self.value_to_register(value); - let ((mut insert_offs, field_size_in_bytes), field_type) = aggregate_idcs_to_field_layout( - self.context, - &aggregate_val.get_type(self.context).unwrap(), - indices, - ); - - let value_type = value.get_type(self.context).unwrap(); - let value_size_in_bytes = ir_type_size_in_bytes(self.context, &value_type); - let value_size_in_words = size_bytes_in_words!(value_size_in_bytes); - - // Account for the padding if the final field type is a union and the value we're trying to - // insert is smaller than the size of the union (i.e. we're inserting a small variant). - if field_type.is_union(self.context) { - let field_size_in_words = size_bytes_in_words!(field_size_in_bytes); - assert!(field_size_in_words >= value_size_in_words); - insert_offs += field_size_in_words - value_size_in_words; - } - - let indices_str = indices - .iter() - .map(|idx| format!("{idx}")) - .collect::>() - .join(","); - - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - - if self.is_copy_type(&value_type) { - if insert_offs > compiler_constants::TWELVE_BITS { - let insert_offs_reg = self.reg_seqr.next(); - self.number_to_reg(insert_offs, &insert_offs_reg, owning_span.clone()); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - base_reg.clone(), - base_reg.clone(), - insert_offs_reg, - )), - comment: "insert_value absolute offset".into(), - owning_span: owning_span.clone(), - }); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - base_reg.clone(), - insert_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: format!("insert_value @ {indices_str}"), - owning_span, - }); - } else { - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - base_reg.clone(), - insert_reg, - VirtualImmediate12 { - value: insert_offs as u16, - }, - )), - comment: format!("insert_value @ {indices_str}"), - owning_span, - }); - } - } else { - let offs_reg = self.reg_seqr.next(); - if insert_offs * 8 > compiler_constants::TWELVE_BITS { - self.number_to_reg(insert_offs * 8, &offs_reg, owning_span.clone()); - } else { - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - offs_reg.clone(), - base_reg.clone(), - VirtualImmediate12 { - value: (insert_offs * 8) as u16, - }, - )), - comment: format!("get struct field(s) {indices_str} offset"), - owning_span: owning_span.clone(), - }); - } - if value_size_in_bytes > compiler_constants::TWELVE_BITS { - let size_reg = self.reg_seqr.next(); - self.number_to_reg(value_size_in_bytes, &size_reg, owning_span.clone()); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCP(offs_reg, insert_reg, size_reg)), - comment: "store struct field value".into(), - owning_span, - }); - } else { - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCPI( - offs_reg, - insert_reg, - VirtualImmediate12 { - value: value_size_in_bytes as u16, - }, - )), - comment: "store struct field value".into(), - owning_span, - }); - } + self.reg_map.insert(*instr_val, instr_reg); + Ok(()) } - - // We set the 'instruction' register to the base register, so that cascading inserts will - // work. - self.reg_map.insert(*instr_val, base_reg); - } - - fn compile_int_to_ptr(&mut self, instr_val: &Value, int_to_ptr_val: &Value) { - let val_reg = self.value_to_register(int_to_ptr_val); - self.reg_map.insert(*instr_val, val_reg); } - fn compile_load(&mut self, instr_val: &Value, src_val: &Value) -> CompileResult<()> { - let local_var = self.resolve_ptr(src_val); - if local_var.value.is_none() { - return local_var.map(|_| ()); - } - let local_var = local_var.value.unwrap().0; - let instr_reg = self.reg_seqr.next(); - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - match self.ptr_map.get(&local_var) { - None => unimplemented!("BUG? Uninitialised pointer."), - Some(storage) => match storage.clone() { - Storage::Data(data_id) => { - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::LWDataId(instr_reg.clone(), data_id)), - comment: "load constant".into(), - owning_span, - }); - } - Storage::Stack(word_offs) => { - let base_reg = self.locals_base_reg().clone(); - if self.is_copy_type(&local_var.get_type(self.context)) { - // Value can fit in a register, so we load the value. - if word_offs > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg( - word_offs * 8, // Base reg for LW is in bytes - &offs_reg, - owning_span.clone(), - ); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - offs_reg.clone(), - base_reg, - offs_reg.clone(), - )), - comment: "absolute offset for load".into(), - owning_span: owning_span.clone(), - }); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - instr_reg.clone(), - offs_reg.clone(), - VirtualImmediate12 { value: 0 }, - )), - comment: "load value".into(), - owning_span, - }); - } else { - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - instr_reg.clone(), - base_reg, - VirtualImmediate12 { - value: word_offs as u16, - }, - )), - comment: "load value".into(), - owning_span, - }); - } - } else { - // Value too big for a register, so we return the memory offset. This is - // what LW to the data section does, via LWDataId. - let word_offs = word_offs * 8; - if word_offs > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg(word_offs, &offs_reg, owning_span.clone()); - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - instr_reg.clone(), - base_reg, - offs_reg, - )), - comment: "load address".into(), - owning_span, - }); - } else { - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - instr_reg.clone(), - base_reg, - VirtualImmediate12 { - value: word_offs as u16, - }, - )), - comment: "load address".into(), - owning_span, - }); - } - } - } - }, - } - self.reg_map.insert(*instr_val, instr_reg); - ok((), Vec::new(), Vec::new()) - } - - fn compile_mem_copy( + fn compile_mem_copy_bytes( &mut self, instr_val: &Value, - dst_val: &Value, - src_val: &Value, + dst_val_ptr: &Value, + src_val_ptr: &Value, byte_len: u64, - ) { + ) -> Result<(), CompileError> { + if byte_len == 0 { + // A zero length MCP will revert. + return Ok(()); + } + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - let dst_reg = self.value_to_register(dst_val); - let src_reg = self.value_to_register(src_val); + let dst_reg = self.value_to_register(dst_val_ptr)?; + let src_reg = self.value_to_register(src_val_ptr)?; let len_reg = self.reg_seqr.next(); self.cur_bytecode.push(Op { @@ -1368,14 +968,43 @@ impl<'ir> FuelAsmBuilder<'ir> { comment: "copy memory with mem_copy".into(), owning_span, }); + + Ok(()) } - fn compile_log(&mut self, instr_val: &Value, log_val: &Value, log_ty: &Type, log_id: &Value) { + fn compile_mem_copy_val( + &mut self, + instr_val: &Value, + dst_val_ptr: &Value, + src_val_ptr: &Value, + ) -> Result<(), CompileError> { + let dst_ty = dst_val_ptr + .get_type(self.context) + .and_then(|ptr_ty| ptr_ty.get_pointee_type(self.context)) + .ok_or_else(|| { + CompileError::Internal( + "mem_copy dst type must be known and a pointer.", + self.md_mgr + .val_to_span(self.context, *instr_val) + .unwrap_or_else(Span::dummy), + ) + })?; + let byte_len = ir_type_size_in_bytes(self.context, &dst_ty); + self.compile_mem_copy_bytes(instr_val, dst_val_ptr, src_val_ptr, byte_len) + } + + fn compile_log( + &mut self, + instr_val: &Value, + log_val: &Value, + log_ty: &Type, + log_id: &Value, + ) -> Result<(), CompileError> { let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - let log_val_reg = self.value_to_register(log_val); - let log_id_reg = self.value_to_register(log_id); + let log_val_reg = self.value_to_register(log_val)?; + let log_id_reg = self.value_to_register(log_id)?; - if self.is_copy_type(log_ty) { + if !log_ty.is_ptr(self.context) { self.cur_bytecode.push(Op { owning_span, opcode: Either::Left(VirtualOp::LOG( @@ -1387,20 +1016,20 @@ impl<'ir> FuelAsmBuilder<'ir> { comment: "".into(), }); } else { - // If the type not a reference type then we use LOGD to log the data. First put the - // size into the data section, then add a LW to get it, then add a LOGD which uses - // it. + // If the type is a pointer then we use LOGD to log the data. First put the size into + // the data section, then add a LW to get it, then add a LOGD which uses it. + let log_ty = log_ty.get_pointee_type(self.context).unwrap(); + let size_in_bytes = ir_type_size_in_bytes(self.context, &log_ty); + let size_reg = self.reg_seqr.next(); - let size_in_bytes = ir_type_size_in_bytes(self.context, log_ty); - let size_data_id = - self.data_section - .insert_data_value(Entry::new_word(size_in_bytes, None, None)); + self.immediate_to_reg( + size_in_bytes, + size_reg.clone(), + None, + "loading size for LOGD", + owning_span.clone(), + ); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::LWDataId(size_reg.clone(), size_data_id)), - owning_span: owning_span.clone(), - comment: "loading size for LOGD".into(), - }); self.cur_bytecode.push(Op { owning_span, opcode: Either::Left(VirtualOp::LOGD( @@ -1412,6 +1041,8 @@ impl<'ir> FuelAsmBuilder<'ir> { comment: "".into(), }); } + + Ok(()) } fn compile_read_register(&mut self, instr_val: &Value, reg: &sway_ir::Register) { @@ -1443,7 +1074,12 @@ impl<'ir> FuelAsmBuilder<'ir> { self.reg_map.insert(*instr_val, instr_reg); } - fn compile_ret_from_entry(&mut self, instr_val: &Value, ret_val: &Value, ret_type: &Type) { + fn compile_ret_from_entry( + &mut self, + instr_val: &Value, + ret_val: &Value, + ret_type: &Type, + ) -> Result<(), CompileError> { let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); if ret_type.is_unit(self.context) { // Unit returns should always be zero, although because they can be omitted from @@ -1457,16 +1093,19 @@ impl<'ir> FuelAsmBuilder<'ir> { comment: "returning unit as zero".into(), }); } else { - let ret_reg = self.value_to_register(ret_val); + let ret_reg = self.value_to_register(ret_val)?; - if self.is_copy_type(ret_type) { + if !ret_type.is_ptr(self.context) && !ret_type.is_slice(self.context) { self.cur_bytecode.push(Op { owning_span, opcode: Either::Left(VirtualOp::RET(ret_reg)), comment: "".into(), }); } else { - // If the type is not a copy type then we use RETD to return data. + // Sometimes (all the time?) a slice type will be `ptr slice`. + let ret_type = ret_type.get_pointee_type(self.context).unwrap_or(*ret_type); + + // If the type is a pointer then we use RETD to return data. let size_reg = self.reg_seqr.next(); if ret_type.is_slice(self.context) { // If this is a slice then return what it points to. @@ -1489,20 +1128,17 @@ impl<'ir> FuelAsmBuilder<'ir> { comment: "load ptr of returned slice".into(), }); } else { - // First put the size into the data section, then add a LW to get it, - // then add a RETD which uses it. - let size_in_bytes = ir_type_size_in_bytes(self.context, ret_type); - let size_data_id = self.data_section.insert_data_value(Entry::new_word( + let size_in_bytes = ir_type_size_in_bytes( + self.context, + &ret_type.get_pointee_type(self.context).unwrap_or(ret_type), + ); + self.immediate_to_reg( size_in_bytes, + size_reg.clone(), None, - None, - )); - - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::LWDataId(size_reg.clone(), size_data_id)), - owning_span: owning_span.clone(), - comment: "load size of returned ref".into(), - }); + "get size of returned ref", + owning_span.clone(), + ); } self.cur_bytecode.push(Op { owning_span, @@ -1511,17 +1147,25 @@ impl<'ir> FuelAsmBuilder<'ir> { }); } } + + Ok(()) } - fn compile_revert(&mut self, instr_val: &Value, revert_val: &Value) { + fn compile_revert( + &mut self, + instr_val: &Value, + revert_val: &Value, + ) -> Result<(), CompileError> { let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - let revert_reg = self.value_to_register(revert_val); + let revert_reg = self.value_to_register(revert_val)?; self.cur_bytecode.push(Op { owning_span, opcode: Either::Left(VirtualOp::RVRT(revert_reg)), comment: "".into(), }); + + Ok(()) } fn compile_smo( @@ -1531,12 +1175,12 @@ impl<'ir> FuelAsmBuilder<'ir> { message_size: &Value, output_index: &Value, coins: &Value, - ) { + ) -> Result<(), CompileError> { let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - let recipient_and_message_reg = self.value_to_register(recipient_and_message); - let message_size_reg = self.value_to_register(message_size); - let output_index_reg = self.value_to_register(output_index); - let coins_reg = self.value_to_register(coins); + let recipient_and_message_reg = self.value_to_register(recipient_and_message)?; + let message_size_reg = self.value_to_register(message_size)?; + let output_index_reg = self.value_to_register(output_index)?; + let coins_reg = self.value_to_register(coins)?; self.cur_bytecode.push(Op { owning_span, @@ -1548,42 +1192,8 @@ impl<'ir> FuelAsmBuilder<'ir> { )), comment: "".into(), }); - } - fn offset_reg( - &mut self, - base_reg: &VirtualRegister, - offset_in_bytes: u64, - span: Option, - ) -> VirtualRegister { - let offset_reg = self.reg_seqr.next(); - if offset_in_bytes > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg(offset_in_bytes, &offs_reg, span.clone()); - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - offset_reg.clone(), - base_reg.clone(), - offs_reg, - )), - comment: "get offset".into(), - owning_span: span, - }); - } else { - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - offset_reg.clone(), - base_reg.clone(), - VirtualImmediate12 { - value: offset_in_bytes as u16, - }, - )), - comment: "get offset".into(), - owning_span: span, - }); - } - - offset_reg + Ok(()) } fn compile_state_clear( @@ -1591,35 +1201,28 @@ impl<'ir> FuelAsmBuilder<'ir> { instr_val: &Value, key: &Value, number_of_slots: &Value, - ) -> CompileResult<()> { - // Make sure that key is a pointer to B256. - assert!(key.get_type(self.context).is(Type::is_b256, self.context)); + ) -> Result<(), CompileError> { let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - let key_var = self.resolve_ptr(key); - if key_var.value.is_none() { - return key_var.map(|_| ()); + // XXX not required after we have FuelVM specific verifier. + if !key + .get_type(self.context) + .map_or(true, |key_ty| key_ty.is_ptr(self.context)) + { + return Err(CompileError::Internal( + "Key value for state clear is not a pointer.", + owning_span.unwrap_or_else(Span::dummy), + )); } - let (key_var, var_ty, offset) = key_var.value.unwrap(); - - // Not expecting an offset here nor a pointer cast - assert!(offset == 0); - assert!(var_ty.is_b256(self.context)); - let key_reg = match self.ptr_map.get(&key_var) { - Some(Storage::Stack(key_offset)) => { - let base_reg = self.locals_base_reg().clone(); - let key_offset_in_bytes = key_offset * 8; - self.offset_reg(&base_reg, key_offset_in_bytes, owning_span.clone()) - } - _ => unreachable!("Unexpected storage locations for key and val"), - }; + // Get the key pointer. + let key_reg = self.value_to_register(key)?; - // capture the status of whether the slot was set before calling this instruction + // Capture the status of whether the slot was set before calling this instruction. let was_slot_set_reg = self.reg_seqr.next(); // Number of slots to be cleared - let number_of_slots_reg = self.value_to_register(number_of_slots); + let number_of_slots_reg = self.value_to_register(number_of_slots)?; self.cur_bytecode.push(Op { opcode: Either::Left(VirtualOp::SCWQ( @@ -1633,7 +1236,7 @@ impl<'ir> FuelAsmBuilder<'ir> { self.reg_map.insert(*instr_val, was_slot_set_reg); - ok((), Vec::new(), Vec::new()) + Ok(()) } fn compile_state_access_quad_word( @@ -1643,63 +1246,28 @@ impl<'ir> FuelAsmBuilder<'ir> { key: &Value, number_of_slots: &Value, access_type: StateAccessType, - ) -> CompileResult<()> { - // Make sure that both val and key are pointers to B256. - assert!(val.get_type(self.context).is(Type::is_b256, self.context)); - assert!(key.get_type(self.context).is(Type::is_b256, self.context)); + ) -> Result<(), CompileError> { let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - let key_var = self.resolve_ptr(key); - if key_var.value.is_none() { - return key_var.map(|_| ()); + // Make sure that both val and key are pointers to B256. + // XXX not required after we have FuelVM specific verifier. + if !val + .get_type(self.context) + .and_then(|val_ty| key.get_type(self.context).map(|key_ty| (val_ty, key_ty))) + .map_or(false, |(val_ty, key_ty)| { + val_ty.is_ptr(self.context) && key_ty.is_ptr(self.context) + }) + { + return Err(CompileError::Internal( + "Val or key value for state access quad word is not a pointer.", + owning_span.unwrap_or_else(Span::dummy), + )); } - let (key_var, var_ty, offset) = key_var.value.unwrap(); - - // Not expecting an offset here nor a pointer cast - assert!(offset == 0); - assert!(var_ty.is_b256(self.context)); - - let val_reg = if matches!( - val.get_instruction(self.context), - Some(Instruction::IntToPtr(..)) - ) { - match self.reg_map.get(val) { - Some(vreg) => vreg.clone(), - None => unreachable!("int_to_ptr instruction doesn't have vreg mapped"), - } - } else { - // Expect ptr_ty here to also be b256 and offset to be whatever... - let local_val = self.resolve_ptr(val); - if local_val.value.is_none() { - return local_val.map(|_| ()); - } - let (local_val, local_val_ty, _offset) = local_val.value.unwrap(); - // Expect the ptr_ty for val to also be B256 - assert!(local_val_ty.is_b256(self.context)); - match self.ptr_map.get(&local_val) { - Some(Storage::Stack(val_offset)) => { - let base_reg = self.locals_base_reg().clone(); - let val_offset_in_bytes = val_offset * 8; - self.offset_reg(&base_reg, val_offset_in_bytes, owning_span.clone()) - } - _ => unreachable!("Unexpected storage locations for key and val"), - } - }; - let key_reg = match self.ptr_map.get(&key_var) { - Some(Storage::Stack(key_offset)) => { - let base_reg = self.locals_base_reg().clone(); - let key_offset_in_bytes = key_offset * 8; - self.offset_reg(&base_reg, key_offset_in_bytes, owning_span.clone()) - } - _ => unreachable!("Unexpected storage locations for key and val"), - }; - - // capture the status of whether the slot was set before calling this instruction + let val_reg = self.value_to_register(val)?; + let key_reg = self.value_to_register(key)?; let was_slot_set_reg = self.reg_seqr.next(); - - // Number of slots to be read or written - let number_of_slots_reg = self.value_to_register(number_of_slots); + let number_of_slots_reg = self.value_to_register(number_of_slots)?; self.cur_bytecode.push(Op { opcode: Either::Left(match access_type { @@ -1722,51 +1290,40 @@ impl<'ir> FuelAsmBuilder<'ir> { self.reg_map.insert(*instr_val, was_slot_set_reg); - ok((), Vec::new(), Vec::new()) + Ok(()) } - fn compile_state_load_word(&mut self, instr_val: &Value, key: &Value) -> CompileResult<()> { - // Make sure that the key is a pointers to B256. - assert!(key.get_type(self.context).is(Type::is_b256, self.context)); + fn compile_state_load_word( + &mut self, + instr_val: &Value, + key: &Value, + ) -> Result<(), CompileError> { + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - let key_var = self.resolve_ptr(key); - if key_var.value.is_none() { - return key_var.map(|_| ()); + // XXX not required after we have FuelVM specific verifier. + if !key + .get_type(self.context) + .map_or(true, |key_ty| key_ty.is_ptr(self.context)) + { + return Err(CompileError::Internal( + "Key value for state load word is not a pointer.", + owning_span.unwrap_or_else(Span::dummy), + )); } - let (key_var, var_ty, offset) = key_var.value.unwrap(); - - // Not expecting an offset here nor a pointer cast - assert!(offset == 0); - assert!(var_ty.is_b256(self.context)); - - let load_reg = self.reg_seqr.next(); - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - // capture the status of whether the slot was set before calling this instruction + let key_reg = self.value_to_register(key)?; let was_slot_set_reg = self.reg_seqr.next(); + let load_reg = self.reg_seqr.next(); - match self.ptr_map.get(&key_var) { - Some(Storage::Stack(key_offset)) => { - let base_reg = self.locals_base_reg().clone(); - let key_offset_in_bytes = key_offset * 8; - - let key_reg = self.offset_reg(&base_reg, key_offset_in_bytes, owning_span.clone()); - - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::SRW( - load_reg.clone(), - was_slot_set_reg, - key_reg, - )), - comment: "single word state access".into(), - owning_span, - }); - } - _ => unreachable!("Unexpected storage location for key"), - } + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::SRW(load_reg.clone(), was_slot_set_reg, key_reg)), + comment: "single word state access".into(), + owning_span, + }); self.reg_map.insert(*instr_val, load_reg); - ok((), Vec::new(), Vec::new()) + + Ok(()) } fn compile_state_store_word( @@ -1774,54 +1331,36 @@ impl<'ir> FuelAsmBuilder<'ir> { instr_val: &Value, store_val: &Value, key: &Value, - ) -> CompileResult<()> { - // Make sure that key is a pointer to B256. - assert!(key.get_type(self.context).is(Type::is_b256, self.context)); + ) -> Result<(), CompileError> { + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - // Make sure that store_val is a U64 value. - assert!(store_val + // XXX not required after we have FuelVM specific verifier. + if !store_val .get_type(self.context) - .is(Type::is_uint64, self.context)); - let store_reg = self.value_to_register(store_val); - - // Expect the get_ptr here to have type b256 and offset = 0??? - let key_var = self.resolve_ptr(key); - if key_var.value.is_none() { - return key_var.map(|_| ()); + .and_then(|val_ty| key.get_type(self.context).map(|key_ty| (val_ty, key_ty))) + .map_or(false, |(val_ty, key_ty)| { + val_ty.is_uint64(self.context) && key_ty.is_ptr(self.context) + }) + { + return Err(CompileError::Internal( + "Val or key value for state store word is not a pointer.", + owning_span.unwrap_or_else(Span::dummy), + )); } - let (key_var, key_var_ty, offset) = key_var.value.unwrap(); - // capture the status of whether the slot was set before calling this instruction + let store_reg = self.value_to_register(store_val)?; + let key_reg = self.value_to_register(key)?; let was_slot_set_reg = self.reg_seqr.next(); - // Not expecting an offset here nor a pointer cast - assert!(offset == 0); - assert!(key_var_ty.is_b256(self.context)); - - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - match self.ptr_map.get(&key_var) { - Some(Storage::Stack(key_offset)) => { - let base_reg = self.locals_base_reg().clone(); - let key_offset_in_bytes = key_offset * 8; - - let key_reg = self.offset_reg(&base_reg, key_offset_in_bytes, owning_span.clone()); - - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::SWW( - key_reg, - was_slot_set_reg.clone(), - store_reg, - )), - comment: "single word state access".into(), - owning_span, - }); - } - _ => unreachable!("Unexpected storage locations for key and store_val"), - } + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::SWW(key_reg, was_slot_set_reg.clone(), store_reg)), + comment: "single word state access".into(), + owning_span, + }); self.reg_map.insert(*instr_val, was_slot_set_reg); - ok((), Vec::new(), Vec::new()) + Ok(()) } fn compile_store( @@ -1829,189 +1368,59 @@ impl<'ir> FuelAsmBuilder<'ir> { instr_val: &Value, dst_val: &Value, stored_val: &Value, - ) -> CompileResult<()> { - let local_var = self.resolve_ptr(dst_val); - if local_var.value.is_none() { - return local_var.map(|_| ()); - } - let local_var = local_var.value.unwrap().0; - let stored_reg = self.value_to_register(stored_val); + ) -> Result<(), CompileError> { let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - match self.ptr_map.get(&local_var) { - None => unreachable!("Bug! Trying to store to an unknown pointer."), - Some(storage) => match storage { - Storage::Data(_) => unreachable!("BUG! Trying to store to the data section."), - Storage::Stack(word_offs) => { - let word_offs = *word_offs; - let store_type = local_var.get_type(self.context); - let store_size_in_words = - size_bytes_in_words!(ir_type_size_in_bytes(self.context, &store_type)); - if self.is_copy_type(&store_type) { - let base_reg = self.locals_base_reg().clone(); - - // A single word can be stored with SW. - let local_var_ty = local_var.get_type(self.context); - let is_aggregate_var = local_var_ty.is_array(self.context) - || local_var_ty.is_struct(self.context) - || local_var_ty.is_union(self.context); - - let stored_reg = if !is_aggregate_var { - // stored_reg is a value. - stored_reg - } else { - // stored_reg is a pointer, even though size is 1. We need to load it. - let tmp_reg = self.reg_seqr.next(); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - tmp_reg.clone(), - stored_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: "load for store".into(), - owning_span: owning_span.clone(), - }); - tmp_reg - }; - if word_offs > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg( - word_offs * 8, // Base reg for SW is in bytes - &offs_reg, - owning_span.clone(), - ); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - offs_reg.clone(), - base_reg, - offs_reg.clone(), - )), - comment: "store absolute offset".into(), - owning_span: owning_span.clone(), - }); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - offs_reg, - stored_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: "store value".into(), - owning_span, - }); - } else { - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - base_reg, - stored_reg, - VirtualImmediate12 { - value: word_offs as u16, - }, - )), - comment: "store value".into(), - owning_span, - }); - } - } else { - let base_reg = self.locals_base_reg().clone(); + if stored_val + .get_type(self.context) + .map_or(true, |ty| !self.is_copy_type(&ty)) + { + // NOTE: Very hacky special case here which must be fixed. We've been given a + // configurable constant which doesn't have a pointer type and shouldn't still be using + // `store`. + if stored_val.is_configurable(self.context) { + // So we know it's not a copy type so we actually need a MCP. + self.compile_mem_copy_val(instr_val, dst_val, stored_val) + } else { + Err(CompileError::Internal( + "Attempt to store a non-copy type.", + owning_span.unwrap_or_else(Span::dummy), + )) + } + } else { + let dst_reg = self.value_to_register(dst_val)?; + let val_reg = self.value_to_register(stored_val)?; - // Bigger than 1 word needs a MCPI. XXX Or MCP if it's huge. - let dest_offs_reg = self.reg_seqr.next(); - if word_offs * 8 > compiler_constants::TWELVE_BITS { - self.number_to_reg(word_offs * 8, &dest_offs_reg, owning_span.clone()); - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - dest_offs_reg.clone(), - base_reg, - dest_offs_reg.clone(), - )), - comment: "get store offset".into(), - owning_span: owning_span.clone(), - }); - } else { - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - dest_offs_reg.clone(), - base_reg, - VirtualImmediate12 { - value: (word_offs * 8) as u16, - }, - )), - comment: "get store offset".into(), - owning_span: owning_span.clone(), - }); - } + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::SW( + dst_reg, + val_reg, + VirtualImmediate12 { value: 0 }, + )), + comment: "store value".into(), + owning_span, + }); - if store_size_in_words * 8 > compiler_constants::TWELVE_BITS { - let size_reg = self.reg_seqr.next(); - self.number_to_reg( - store_size_in_words * 8, - &size_reg, - owning_span.clone(), - ); - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCP( - dest_offs_reg, - stored_reg, - size_reg, - )), - comment: "store value".into(), - owning_span, - }); - } else { - self.cur_bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCPI( - dest_offs_reg, - stored_reg, - VirtualImmediate12 { - value: (store_size_in_words * 8) as u16, - }, - )), - comment: "store value".into(), - owning_span, - }); - } - } - } - }, - }; - ok((), Vec::new(), Vec::new()) + Ok(()) + } } - pub(crate) fn is_copy_type(&self, ty: &Type) -> bool { - ty.is_unit(self.context) || ty.is_bool(self.context) | ty.is_uint(self.context) + fn compile_no_op_move( + &mut self, + instr_val: &Value, + rhs_val: &Value, + ) -> Result<(), CompileError> { + // For cast_ptr, int_to_ptr, ptr_to_int, etc. these are NOPs and just need updates to the + // register map. + self.value_to_register(rhs_val).map(|val_reg| { + self.reg_map.insert(*instr_val, val_reg); + }) } - fn resolve_ptr(&mut self, ptr_val: &Value) -> CompileResult<(LocalVar, Type, u64)> { - let mut warnings = Vec::new(); - let mut errors = Vec::new(); - match ptr_val.get_instruction(self.context) { - // Return the local variable with its type and an offset of 0. - Some(Instruction::GetLocal(local_var)) => ok( - (*local_var, local_var.get_type(self.context), 0), - warnings, - errors, - ), - - // Recurse to find the local variable but override the type and offset. - Some(Instruction::CastPtr(local_val, ty, offs)) => { - let var = check!( - self.resolve_ptr(local_val), - return err(warnings, errors), - warnings, - errors - ); - ok((var.0, *ty, *offs), warnings, errors) - } + // --------------------------------------------------------------------------------------------- - _otherwise => { - errors.push(CompileError::Internal( - "Destination arg for load/store is not valid.", - self.md_mgr - .val_to_span(self.context, *ptr_val) - .unwrap_or_else(Self::empty_span), - )); - err(warnings, errors) - } - } + // XXX reassess all the places we use this + pub(crate) fn is_copy_type(&self, ty: &Type) -> bool { + ty.is_unit(self.context) || ty.is_bool(self.context) || ty.is_uint(self.context) } fn initialise_constant( @@ -2065,9 +1474,12 @@ impl<'ir> FuelAsmBuilder<'ir> { // to determine when it may be initialised and/or reused. } - // Get the reg corresponding to `value`. Returns None if the value is not in reg_map or is not - // a constant. - fn opt_value_to_register(&mut self, value: &Value) -> Option { + // Get the reg corresponding to `value`. Returns an ICE if the value is not in reg_map or is + // not a constant. + pub(super) fn value_to_register( + &mut self, + value: &Value, + ) -> Result { self.reg_map .get(value) .cloned() @@ -2094,64 +1506,73 @@ impl<'ir> FuelAsmBuilder<'ir> { initialized.0 }) }) + .ok_or_else(|| { + CompileError::Internal( + "An attempt to get register for unknown Value.", + Span::dummy(), + ) + }) } - /// Same as [`opt_value_to_register`] but returns a new register if no register is found or if - /// `value` is not a constant. - pub(super) fn value_to_register(&mut self, value: &Value) -> VirtualRegister { - match self.opt_value_to_register(value) { - Some(reg) => reg, - None => { - // Just make a new register for this value. - let reg = self.reg_seqr.next(); - self.reg_map.insert(*value, reg.clone()); - reg - } - } - } - - pub(super) fn number_to_reg( + pub(super) fn immediate_to_reg>( &mut self, - offset: u64, - offset_reg: &VirtualRegister, + imm: u64, + reg: VirtualRegister, + base: Option<&VirtualRegister>, + comment: S, span: Option, ) { - if offset > compiler_constants::TWENTY_FOUR_BITS { - todo!("Absolutely giant arrays."); + // We have a few different options here. + // - If we're given a base to add to and the immediate is small enough we can use ADDI. + // - If the immediate is too big for that then we need to MOVI and ADD. + // - If the immediate is very big then we LW and ADD. + // XXX This can be done with peephole optimisations when we get them. + if imm <= compiler_constants::TWELVE_BITS && base.is_some() { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADDI( + reg, + #[allow(clippy::unnecessary_unwrap)] + base.unwrap().clone(), + VirtualImmediate12 { value: imm as u16 }, + )), + comment: comment.into(), + owning_span: span, + }); + } else if imm <= compiler_constants::EIGHTEEN_BITS { + let comment = comment.into(); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MOVI( + reg.clone(), + VirtualImmediate18 { value: imm as u32 }, + )), + comment: comment.clone(), + owning_span: span.clone(), + }); + if let Some(base_reg) = base { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD(reg.clone(), base_reg.clone(), reg)), + comment, + owning_span: span, + }); + } + } else { + let comment = comment.into(); + let data_id = self + .data_section + .insert_data_value(Entry::new_word(imm, None, None)); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LWDataId(reg.clone(), data_id)), + owning_span: span.clone(), + comment: comment.clone(), + }); + if let Some(base_reg) = base { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD(reg.clone(), base_reg.clone(), reg)), + comment, + owning_span: span, + }); + } } - - // Use bitwise ORs and SHIFTs to crate a 24 bit value in a register. - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ORI( - offset_reg.clone(), - VirtualRegister::Constant(ConstantRegister::Zero), - VirtualImmediate12 { - value: (offset >> 12) as u16, - }, - )), - comment: "get extract offset high bits".into(), - owning_span: span.clone(), - }); - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::SLLI( - offset_reg.clone(), - offset_reg.clone(), - VirtualImmediate12 { value: 12 }, - )), - comment: "shift extract offset high bits".into(), - owning_span: span.clone(), - }); - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ORI( - offset_reg.clone(), - offset_reg.clone(), - VirtualImmediate12 { - value: (offset & 0xfff) as u16, - }, - )), - comment: "get extract offset low bits".into(), - owning_span: span, - }); } pub(super) fn func_to_labels(&mut self, func: &Function) -> (Label, Label) { @@ -2162,7 +1583,7 @@ impl<'ir> FuelAsmBuilder<'ir> { }) } - fn block_to_label(&mut self, block: &Block) -> Label { + pub(super) fn block_to_label(&mut self, block: &Block) -> Label { self.block_label_map.get(block).cloned().unwrap_or_else(|| { let label = self.reg_seqr.get_label(); self.block_label_map.insert(*block, label); diff --git a/sway-core/src/asm_generation/fuel/functions.rs b/sway-core/src/asm_generation/fuel/functions.rs index 40b5de09f9f..32ec9f005b7 100644 --- a/sway-core/src/asm_generation/fuel/functions.rs +++ b/sway-core/src/asm_generation/fuel/functions.rs @@ -17,6 +17,7 @@ use crate::{ use sway_ir::*; use either::Either; +use sway_error::error::CompileError; use sway_types::Ident; /// A summary of the adopted calling convention: @@ -53,11 +54,16 @@ use sway_types::Ident; /// - Jump to the return address. impl<'ir> FuelAsmBuilder<'ir> { - pub(super) fn compile_call(&mut self, instr_val: &Value, function: &Function, args: &[Value]) { + pub(super) fn compile_call( + &mut self, + instr_val: &Value, + function: &Function, + args: &[Value], + ) -> Result<(), CompileError> { // Put the args into the args registers. for (idx, arg_val) in args.iter().enumerate() { if idx < compiler_constants::NUM_ARG_REGISTERS as usize { - let arg_reg = self.value_to_register(arg_val); + let arg_reg = self.value_to_register(arg_val)?; self.cur_bytecode.push(Op::register_move( VirtualRegister::Constant(ConstantRegister::ARG_REGS[idx]), arg_reg, @@ -101,12 +107,18 @@ impl<'ir> FuelAsmBuilder<'ir> { owning_span: None, }); self.reg_map.insert(*instr_val, ret_reg); + + Ok(()) } - pub(super) fn compile_ret_from_call(&mut self, instr_val: &Value, ret_val: &Value) { + pub(super) fn compile_ret_from_call( + &mut self, + instr_val: &Value, + ret_val: &Value, + ) -> Result<(), CompileError> { // Move the result into the return value register. let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - let ret_reg = self.value_to_register(ret_val); + let ret_reg = self.value_to_register(ret_val)?; self.cur_bytecode.push(Op::register_move( VirtualRegister::Constant(ConstantRegister::CallReturnValue), ret_reg, @@ -121,6 +133,8 @@ impl<'ir> FuelAsmBuilder<'ir> { .expect("Calls guaranteed to save return context.") .0; self.cur_bytecode.push(Op::jump_to_label(end_label)); + + Ok(()) } pub fn compile_function(&mut self, function: Function) -> CompileResult<()> { @@ -177,8 +191,12 @@ impl<'ir> FuelAsmBuilder<'ir> { }); } + let mut warnings = Vec::new(); + let mut errors = Vec::new(); + if func_is_entry { - self.compile_external_args(function) + let result = Into::>::into(self.compile_external_args(function)); + check!(result, return err(warnings, errors), warnings, errors); } else { // Make copies of the arg registers. self.compile_fn_call_args(function) @@ -207,15 +225,13 @@ impl<'ir> FuelAsmBuilder<'ir> { self.init_locals(function); - // Compile instructions. - let mut warnings = Vec::new(); - let mut errors = Vec::new(); - - // Traverse the IR blocks in reverse post order. This guarantees that each block is - // processed after all its CFG predecessors have been processed. + // Compile instructions. Traverse the IR blocks in reverse post order. This guarantees that + // each block is processed after all its CFG predecessors have been processed. let po = sway_ir::dominator::compute_post_order(self.context, &function); for block in po.po_to_block.iter().rev() { - self.insert_block_label(*block); + let label = self.block_to_label(block); + self.cur_bytecode.push(Op::unowned_jump_label(label)); + for instr_val in block.instruction_iter(self.context) { check!( self.compile_instruction(&instr_val, func_is_entry), @@ -298,19 +314,21 @@ impl<'ir> FuelAsmBuilder<'ir> { } // Handle loading the arguments of a contract call - fn compile_external_args(&mut self, function: Function) { + fn compile_external_args(&mut self, function: Function) -> Result<(), CompileError> { match function.args_iter(self.context).count() { // Nothing to do if there are no arguments - 0 => (), + 0 => Ok(()), // A special case for when there's only a single arg, its value (or address) is placed // directly in the base register. 1 => { let (_, val) = function.args_iter(self.context).next().unwrap(); - let single_arg_reg = self.value_to_register(val); + let single_arg_reg = self.reg_seqr.next(); match self.program_kind { - ProgramKind::Contract => self.read_args_base_from_frame(&single_arg_reg), - ProgramKind::Library => (), // Nothing to do here + ProgramKind::Contract => { + self.read_args_base_from_frame(&single_arg_reg); + } + ProgramKind::Library => {} // Nothing to do here ProgramKind::Script | ProgramKind::Predicate => { if let ProgramKind::Predicate = self.program_kind { self.read_args_base_from_predicate_data(&single_arg_reg); @@ -319,6 +337,7 @@ impl<'ir> FuelAsmBuilder<'ir> { } // The base is an offset. Dereference it. + // XXX val.get_type() should be a pointer if it's not meant to be loaded. if val .get_type(self.context) .map_or(false, |t| self.is_copy_type(&t)) @@ -335,6 +354,8 @@ impl<'ir> FuelAsmBuilder<'ir> { } } } + self.reg_map.insert(*val, single_arg_reg); + Ok(()) } // Otherwise, the args are bundled together and pointed to by the base register. @@ -342,7 +363,7 @@ impl<'ir> FuelAsmBuilder<'ir> { let args_base_reg = self.reg_seqr.next(); match self.program_kind { ProgramKind::Contract => self.read_args_base_from_frame(&args_base_reg), - ProgramKind::Library => return, // Nothing to do here + ProgramKind::Library => return Ok(()), // Nothing to do here ProgramKind::Predicate => { self.read_args_base_from_predicate_data(&args_base_reg) } @@ -353,8 +374,14 @@ impl<'ir> FuelAsmBuilder<'ir> { // and whether the offset fits in a 12-bit immediate. let mut arg_word_offset = 0; for (name, val) in function.args_iter(self.context) { - let current_arg_reg = self.value_to_register(val); - let arg_type = val.get_type(self.context).unwrap(); + let current_arg_reg = self.reg_seqr.next(); + + // The function arg type might be a pointer, but the value in the struct will + // be of the pointed to type. So strip the pointer if necessary. + let arg_type = val + .get_type(self.context) + .map(|ty| ty.get_pointee_type(self.context).unwrap_or(ty)) + .unwrap(); let arg_type_size_bytes = ir_type_size_in_bytes(self.context, &arg_type); if self.is_copy_type(&arg_type) { if arg_word_offset > compiler_constants::TWELVE_BITS { @@ -390,34 +417,21 @@ impl<'ir> FuelAsmBuilder<'ir> { owning_span: None, }); } - } else if arg_word_offset * 8 > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg(arg_word_offset * 8, &offs_reg, None); - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - current_arg_reg.clone(), - args_base_reg.clone(), - offs_reg, - )), - comment: format!("get offset or arg {name}"), - owning_span: None, - }); } else { - self.cur_bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - current_arg_reg.clone(), - args_base_reg.clone(), - VirtualImmediate12 { - value: (arg_word_offset * 8) as u16, - }, - )), - comment: format!("get address for arg {name}"), - owning_span: None, - }); + self.immediate_to_reg( + arg_word_offset * 8, + current_arg_reg.clone(), + Some(&args_base_reg), + format!("get offset or arg {name}"), + None, + ); } arg_word_offset += size_bytes_in_words!(arg_type_size_bytes); + self.reg_map.insert(*val, current_arg_reg); } + + Ok(()) } } } @@ -596,51 +610,43 @@ impl<'ir> FuelAsmBuilder<'ir> { fn init_locals(&mut self, function: Function) { // If they're immutable and have a constant initialiser then they go in the data section. + // XXX Everything is mutable in the IR now. + // // Otherwise they go in runtime allocated space, either a register or on the stack. // // Stack offsets are in words to both enforce alignment and simplify use with LW/SW. - let mut stack_base = 0_u64; - for (_name, ptr) in function.locals_iter(self.context) { - if let Some(constant) = ptr.get_initializer(self.context) { - let data_id = self.data_section.insert_data_value(Entry::from_constant( - self.context, - constant, - None, - )); - self.ptr_map.insert(*ptr, Storage::Data(data_id)); - } else { - let ptr_ty = ptr.get_type(self.context); - match ptr_ty.get_content(self.context) { - TypeContent::Unit | TypeContent::Bool | TypeContent::Uint(_) => { - self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); - stack_base += 1; - } - TypeContent::Slice => { - self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); - stack_base += 2; - } - TypeContent::B256 => { - // XXX Like strings, should we just reserve space for a pointer? - self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); - stack_base += 4; - } - TypeContent::String(n) => { - // Strings are always constant and used by reference, so we only store the - // pointer on the stack. - self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); - stack_base += size_bytes_round_up_to_word_alignment!(n) - } - TypeContent::Array(..) | TypeContent::Struct(_) | TypeContent::Union(_) => { - // Store this aggregate at the current stack base. - self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); - - // Reserve space by incrementing the base. - stack_base += - size_bytes_in_words!(ir_type_size_in_bytes(self.context, &ptr_ty)); - } - }; - } - } + let stack_base = function + .locals_iter(self.context) + .fold(0, |stack_base, (_name, ptr)| { + if let Some(constant) = ptr.get_initializer(self.context) { + let data_id = self.data_section.insert_data_value(Entry::from_constant( + self.context, + constant, + None, + )); + self.ptr_map.insert(*ptr, Storage::Data(data_id)); + stack_base + } else { + self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); + + let ptr_ty = ptr.get_inner_type(self.context); + stack_base + + match ptr_ty.get_content(self.context) { + TypeContent::Unit + | TypeContent::Bool + | TypeContent::Uint(_) + | TypeContent::Pointer(_) => 1, + TypeContent::Slice => 2, + TypeContent::B256 => 4, + TypeContent::String(n) => size_bytes_round_up_to_word_alignment!(n), + TypeContent::Array(..) + | TypeContent::Struct(_) + | TypeContent::Union(_) => { + size_bytes_in_words!(ir_type_size_in_bytes(self.context, &ptr_ty)) + } + } + } + }); // Reserve space on the stack (in bytes) for all our locals which require it. Firstly save // the current $sp. diff --git a/sway-core/src/asm_generation/fuel/register_allocator.rs b/sway-core/src/asm_generation/fuel/register_allocator.rs index 81e9069da08..770a6a64832 100644 --- a/sway-core/src/asm_generation/fuel/register_allocator.rs +++ b/sway-core/src/asm_generation/fuel/register_allocator.rs @@ -9,6 +9,8 @@ use either::Either; use petgraph::graph::{node_index, NodeIndex}; use rustc_hash::FxHashSet; use std::collections::{BTreeSet, HashMap}; +use sway_error::error::CompileError; +use sway_types::span::Span; pub type InterferenceGraph = petgraph::stable_graph::StableGraph, (), petgraph::Undirected>; @@ -438,7 +440,7 @@ pub(crate) fn color_interference_graph( /// pub(crate) fn assign_registers( stack: &mut Vec<(VirtualRegister, BTreeSet)>, -) -> RegisterPool { +) -> Result { let mut pool = RegisterPool::init(); while let Some((reg, neighbors)) = stack.pop() { if matches!(reg, VirtualRegister::Virtual(_)) { @@ -453,14 +455,15 @@ pub(crate) fn assign_registers( used_by.insert(reg.clone()); } else { // Error out for now if no available register is found - unimplemented!( + return Err(CompileError::Internal( "The allocator cannot resolve a register mapping for this program. \ - This is a temporary artifact of the extremely early stage version \ - of this language. Try to lower the number of variables you use." - ); + This is a temporary artifact of the early stage version of this \ + compiler. Using #[inline(never)] on some functions may help.", + Span::dummy(), + )); } } } - pool + Ok(pool) } diff --git a/sway-core/src/asm_generation/miden_vm/miden_vm_asm_builder.rs b/sway-core/src/asm_generation/miden_vm/miden_vm_asm_builder.rs index 913d5bbd572..def18edb9e5 100644 --- a/sway-core/src/asm_generation/miden_vm/miden_vm_asm_builder.rs +++ b/sway-core/src/asm_generation/miden_vm/miden_vm_asm_builder.rs @@ -273,7 +273,6 @@ impl<'ir> MidenVMAsmBuilder<'ir> { pub(super) fn compile_instruction(&mut self, instr_val: &Value) { if let Some(instruction) = instr_val.get_instruction(self.context) { match instruction { - Instruction::AddrOf(arg) => todo!(), Instruction::AsmBlock(asm, args) => todo!(), Instruction::BitCast(val, ty) => todo!(), Instruction::BinaryOp { op, arg1, arg2 } => { @@ -281,7 +280,7 @@ impl<'ir> MidenVMAsmBuilder<'ir> { } Instruction::Branch(to_block) => todo!(), Instruction::Call(func, args) => self.compile_call(instr_val, func, args), - Instruction::CastPtr(val, ty, offs) => { + Instruction::CastPtr(val, ty) => { todo!() } Instruction::Cmp(pred, lhs_value, rhs_value) => { @@ -299,39 +298,29 @@ impl<'ir> MidenVMAsmBuilder<'ir> { gas, .. } => todo!(), - Instruction::ExtractElement { - array, - ty, - index_val, - } => todo!(), - Instruction::ExtractValue { - aggregate, indices, .. - } => todo!(), Instruction::FuelVm(fuel_vm_instr) => todo!(), - Instruction::GetLocal(local_var) => todo!(), - Instruction::InsertElement { - array, - ty, - value, - index_val, - } => todo!(), - Instruction::InsertValue { - aggregate, - value, + Instruction::GetElemPtr { + base, + elem_ptr_ty, indices, - .. } => todo!(), + Instruction::GetLocal(local_var) => todo!(), Instruction::IntToPtr(val, _) => todo!(), Instruction::Load(src_val) => todo!(), - Instruction::MemCopy { - dst_val, - src_val, + Instruction::MemCopyBytes { + dst_val_ptr, + src_val_ptr, byte_len, } => todo!(), + Instruction::MemCopyVal { + dst_val_ptr, + src_val_ptr, + } => todo!(), Instruction::Nop => (), + Instruction::PtrToInt(ptr_val, int_ty) => todo!(), Instruction::Ret(ret_val, ty) => self.compile_return(ret_val, ty), Instruction::Store { - dst_val, + dst_val_ptr, stored_val, } => todo!(), } diff --git a/sway-core/src/asm_generation/programs/abstract.rs b/sway-core/src/asm_generation/programs/abstract.rs index d3ef4291da7..cb52a9ab7de 100644 --- a/sway-core/src/asm_generation/programs/abstract.rs +++ b/sway-core/src/asm_generation/programs/abstract.rs @@ -72,9 +72,12 @@ impl AbstractProgram { // Allocate the registers for each function. let functions = abstract_functions .into_iter() - .map(|fn_ops| fn_ops.allocate_registers()) - .map(AllocatedAbstractInstructionSet::emit_pusha_popa) - .collect::>(); + .map(|fn_ops| { + fn_ops + .allocate_registers() + .map(AllocatedAbstractInstructionSet::emit_pusha_popa) + }) + .collect::, _>>()?; // XXX need to verify that the stack use for each function is balanced. diff --git a/sway-core/src/control_flow_analysis/dead_code_analysis.rs b/sway-core/src/control_flow_analysis/dead_code_analysis.rs index e2afbf51934..0766c0d031d 100644 --- a/sway-core/src/control_flow_analysis/dead_code_analysis.rs +++ b/sway-core/src/control_flow_analysis/dead_code_analysis.rs @@ -1478,7 +1478,10 @@ fn connect_expression<'eng: 'cfg, 'cfg>( address.span.clone(), options, ), - Array { contents } => { + Array { + elem_type: _, + contents, + } => { let nodes = contents .iter() .map(|elem| { diff --git a/sway-core/src/ir_generation.rs b/sway-core/src/ir_generation.rs index 80d54fd1d6f..f6166a8dd56 100644 --- a/sway-core/src/ir_generation.rs +++ b/sway-core/src/ir_generation.rs @@ -90,6 +90,10 @@ pub fn compile_program( &test_fns, ), }?; - ctx.verify() - .map_err(|ir_error| CompileError::InternalOwned(ir_error.to_string(), Span::dummy())) + + //println!("{ctx}"); + + ctx.verify().map_err(|ir_error: sway_ir::IrError| { + CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) + }) } diff --git a/sway-core/src/ir_generation/compile.rs b/sway-core/src/ir_generation/compile.rs index e6228d006dd..3c54177e886 100644 --- a/sway-core/src/ir_generation/compile.rs +++ b/sway-core/src/ir_generation/compile.rs @@ -1,10 +1,10 @@ use crate::{ - decl_engine::{DeclEngine, DeclRefFunction}, + decl_engine::DeclRefFunction, language::{ty, Visibility}, metadata::MetadataManager, semantic_analysis::namespace, - type_system::*, - types::*, + type_system::TypeId, + types::{LogId, MessageId}, Engines, }; @@ -16,7 +16,7 @@ use super::{ use sway_error::error::CompileError; use sway_ir::{metadata::combine as md_combine, *}; -use sway_types::{span::Span, Spanned}; +use sway_types::Spanned; use std::collections::HashMap; @@ -308,27 +308,18 @@ pub(super) fn compile_function( is_entry: bool, test_decl_ref: Option, ) -> Result, CompileError> { - let type_engine = engines.te(); - let decl_engine = engines.de(); // Currently monomorphization of generics is inlined into main() and the functions with generic // args are still present in the AST declarations, but they can be ignored. if !ast_fn_decl.type_parameters.is_empty() { Ok(None) } else { - let args = ast_fn_decl - .parameters - .iter() - .map(|param| convert_fn_param(type_engine, decl_engine, context, param)) - .collect::, CompileError>>()?; - - compile_fn_with_args( + compile_fn( engines, context, md_mgr, module, ast_fn_decl, is_entry, - args, None, logged_types_map, messages_types_map, @@ -390,35 +381,14 @@ pub(super) fn compile_tests( .collect() } -fn convert_fn_param( - type_engine: &TypeEngine, - decl_engine: &DeclEngine, - context: &mut Context, - param: &ty::TyFunctionParameter, -) -> Result<(String, Type, bool, Span), CompileError> { - convert_resolved_typeid( - type_engine, - decl_engine, - context, - ¶m.type_argument.type_id, - ¶m.type_argument.span, - ) - .map(|ty| { - let by_ref = - param.is_reference && type_engine.get(param.type_argument.type_id).is_copy_type(); - (param.name.as_str().into(), ty, by_ref, param.name.span()) - }) -} - #[allow(clippy::too_many_arguments)] -fn compile_fn_with_args( +fn compile_fn( engines: Engines<'_>, context: &mut Context, md_mgr: &mut MetadataManager, module: Module, ast_fn_decl: &ty::TyFunctionDecl, is_entry: bool, - args: Vec<(String, Type, bool, Span)>, selector: Option<[u8; 4]>, logged_types_map: &HashMap, messages_types_map: &HashMap, @@ -438,10 +408,33 @@ fn compile_fn_with_args( .. } = ast_fn_decl; - let mut args = args - .into_iter() - .map(|(name, ty, by_ref, span)| (name, ty, by_ref, md_mgr.span_to_md(context, &span))) - .collect::>(); + let args = ast_fn_decl + .parameters + .iter() + .map(|param| { + // Convert to an IR type. + convert_resolved_typeid( + type_engine, + decl_engine, + context, + ¶m.type_argument.type_id, + ¶m.type_argument.span, + ) + .map(|ty| { + ( + // Convert the name. + param.name.as_str().into(), + // Convert the type further to a pointer if it's a reference. + param + .is_reference + .then(|| Type::new_ptr(context, ty)) + .unwrap_or(ty), + // Convert the span to a metadata index. + md_mgr.span_to_md(context, ¶m.name.span()), + ) + }) + }) + .collect::, CompileError>>()?; let ret_type = convert_resolved_typeid( type_engine, @@ -451,17 +444,6 @@ fn compile_fn_with_args( &return_type.span, )?; - let returns_by_ref = !is_entry && !type_engine.get(return_type.type_id).is_copy_type(); - if returns_by_ref { - // Instead of 'returning' a by-ref value we make the last argument an 'out' parameter. - args.push(( - "__ret_value".to_owned(), - ret_type, - true, - md_mgr.span_to_md(context, &return_type.span), - )); - } - let span_md_idx = md_mgr.span_to_md(context, span); let storage_md_idx = md_mgr.purity_to_md(context, *purity); let mut metadata = md_combine(context, &span_md_idx, &storage_md_idx); @@ -493,7 +475,6 @@ fn compile_fn_with_args( context, module, func, - returns_by_ref, logged_types_map, messages_types_map, ); @@ -526,10 +507,6 @@ fn compile_fn_with_args( || compiler.current_block == compiler.function.get_entry_block(context) || compiler.current_block.num_predecessors(context) > 0) { - if returns_by_ref { - // Need to copy ref-type return values to the 'out' parameter. - ret_val = compiler.compile_copy_to_last_arg(context, ret_val, None); - } if ret_type.is_unit(context) { ret_val = Constant::get_unit(context); } @@ -538,34 +515,6 @@ fn compile_fn_with_args( Ok(func) } -/* Disabled until we can improve symbol resolution. See comments above in compile_declarations(). - -fn compile_impl( - context: &mut Context, - module: Module, - self_type: TypeInfo, - ast_methods: Vec, -) -> Result<(), CompileError> { - for method in ast_methods { - let args = method - .parameters - .iter() - .map(|param| { - if param.name.as_str() == "self" { - convert_resolved_type(context, &self_type) - } else { - convert_resolved_typeid(context, ¶m.type_id, ¶m.type_span) - } - .map(|ty| (param.name.as_str().into(), ty, param.name.span().clone())) - }) - .collect::, CompileError>>()?; - - compile_fn_with_args(context, module, method, args, None)?; - } - Ok(()) -} -*/ - fn compile_abi_method( context: &mut Context, md_mgr: &mut MetadataManager, @@ -602,29 +551,13 @@ fn compile_abi_method( // An ABI method is always an entry point. let is_entry = true; - let args = ast_fn_decl - .parameters - .iter() - .map(|param| { - convert_resolved_typeid( - type_engine, - decl_engine, - context, - ¶m.type_argument.type_id, - ¶m.type_argument.span, - ) - .map(|ty| (param.name.as_str().into(), ty, false, param.name.span())) - }) - .collect::, CompileError>>()?; - - compile_fn_with_args( + compile_fn( engines, context, md_mgr, module, ast_fn_decl, is_entry, - args, Some(selector), logged_types_map, messages_types_map, diff --git a/sway-core/src/ir_generation/const_eval.rs b/sway-core/src/ir_generation/const_eval.rs index 1ec08bc6b29..73454f0e419 100644 --- a/sway-core/src/ir_generation/const_eval.rs +++ b/sway-core/src/ir_generation/const_eval.rs @@ -61,7 +61,7 @@ pub(crate) fn compile_const_decl( let mut stored_const_opt: Option<&Constant> = None; for ins in fn_compiler.current_block.instruction_iter(env.context) { if let Some(Instruction::Store { - dst_val, + dst_val_ptr: dst_val, stored_val, }) = ins.get_instruction(env.context) { @@ -300,7 +300,7 @@ fn const_eval_typed_expr( // We couldn't evaluate all fields to a constant. return Ok(None); } - get_aggregate_for_types( + get_struct_for_types( lookup.type_engine, lookup.decl_engine, lookup.context, @@ -341,7 +341,10 @@ fn const_eval_typed_expr( )) }) } - ty::TyExpressionVariant::Array { contents } => { + ty::TyExpressionVariant::Array { + elem_type, + contents, + } => { let (mut element_typs, mut element_vals): (Vec<_>, Vec<_>) = (vec![], vec![]); for value in contents { let eval_expr_opt = const_eval_typed_expr(lookup, known_consts, value)?; @@ -354,11 +357,10 @@ fn const_eval_typed_expr( // We couldn't evaluate all fields to a constant or cannot determine element type. return Ok(None); } - let mut element_iter = element_typs.iter(); - let element_type_id = *element_iter.next().unwrap(); - if !element_iter.all(|tid| { + let elem_type_info = lookup.type_engine.get(*elem_type); + if !element_typs.iter().all(|tid| { lookup.type_engine.get(*tid).eq( - &lookup.type_engine.get(element_type_id), + &elem_type_info, Engines::new(lookup.type_engine, lookup.decl_engine), ) }) { @@ -369,7 +371,7 @@ fn const_eval_typed_expr( lookup.type_engine, lookup.decl_engine, lookup.context, - element_type_id, + *elem_type, element_typs.len().try_into().unwrap(), ) .map_or(None, |array_ty| { @@ -387,7 +389,7 @@ fn const_eval_typed_expr( .. } => { let enum_decl = lookup.decl_engine.get_enum(enum_ref); - let aggregate = create_enum_aggregate( + let aggregate = create_tagged_union_type( lookup.type_engine, lookup.decl_engine, lookup.context, diff --git a/sway-core/src/ir_generation/convert.rs b/sway-core/src/ir_generation/convert.rs index 780bf6188de..3a94495382d 100644 --- a/sway-core/src/ir_generation/convert.rs +++ b/sway-core/src/ir_generation/convert.rs @@ -5,7 +5,7 @@ use crate::{ TypeEngine, }; -use super::types::{create_enum_aggregate, create_tuple_aggregate}; +use super::types::{create_tagged_union_type, create_tuple_aggregate}; use sway_error::error::CompileError; use sway_ir::{Constant, Context, Type, Value}; @@ -19,6 +19,8 @@ pub(super) fn convert_literal_to_value(context: &mut Context, ast_literal: &Lite // consistent and doesn't tolerate mising integers of different width, so for now, until we // do introduce explicit `as` casting, all integers are `u64` as far as the IR is // concerned. + // + // XXX The above isn't true for other targets. We need to improved this. Literal::U8(n) => Constant::get_uint(context, 64, *n as u64), Literal::U16(n) => Constant::get_uint(context, 64, *n as u64), Literal::U32(n) => Constant::get_uint(context, 64, *n as u64), @@ -102,7 +104,7 @@ fn convert_resolved_type( TypeInfo::Boolean => Type::get_bool(context), TypeInfo::B256 => Type::get_b256(context), TypeInfo::Str(n) => Type::new_string(context, n.val() as u64), - TypeInfo::Struct(decl_ref) => super::types::get_aggregate_for_types( + TypeInfo::Struct(decl_ref) => super::types::get_struct_for_types( type_engine, decl_engine, context, @@ -114,7 +116,7 @@ fn convert_resolved_type( .collect::>() .as_slice(), )?, - TypeInfo::Enum(decl_ref) => create_enum_aggregate( + TypeInfo::Enum(decl_ref) => create_tagged_union_type( type_engine, decl_engine, context, diff --git a/sway-core/src/ir_generation/function.rs b/sway-core/src/ir_generation/function.rs index 08d29a055e1..18d3fe598a0 100644 --- a/sway-core/src/ir_generation/function.rs +++ b/sway-core/src/ir_generation/function.rs @@ -29,6 +29,30 @@ use sway_types::{ use std::collections::HashMap; +/// Engine for compiling a function and all of the AST nodes within. +/// +/// This is mostly recursively compiling expressions, as Sway is fairly heavily expression based. +/// +/// The rule here is to use compile_expression_to_value() when a value is desired, as opposed to a +/// pointer. This is most of the time, as we try to be target agnostic and not make assumptions +/// about which values must be used by reference. +/// +/// compile_expression_to_value() will force the result to be a value, by using a temporary if +/// necessary. +/// +/// compile_expression_to_ptr() will compile the expression and force it to be a pointer, also by +/// using a temporary if necessary. This can be slightly dangerous, if the reference is supposed +/// to be to a particular value but is accidentally made to a temporary value then mutations or +/// other side-effects might not be applied in the correct context. +/// +/// compile_expression() will compile the expression without forcing anything. If the expression +/// has a reference type, like getting a struct or an explicit ref arg, it will return a pointer +/// value, but otherwise will return a value. +/// +/// So in general the methods in FnCompiler will return a pointer if they can and will get it be +/// forced into a value if that is desired. All the temporary values are manipulated with simple +/// loads and stores, rather than anything more complicated like mem_copys. + pub(crate) struct FnCompiler<'eng> { type_engine: &'eng TypeEngine, decl_engine: &'eng DeclEngine, @@ -38,7 +62,6 @@ pub(crate) struct FnCompiler<'eng> { block_to_break_to: Option, block_to_continue_to: Option, current_fn_param: Option, - returns_by_ref: bool, lexical_map: LexicalMap, recreated_fns: HashMap<(Span, Vec, Vec), Function>, // This is a map from the type IDs of a logged type and the ID of the corresponding log @@ -53,7 +76,6 @@ impl<'eng> FnCompiler<'eng> { context: &mut Context, module: Module, function: Function, - returns_by_ref: bool, logged_types_map: &HashMap, messages_types_map: &HashMap, ) -> Self { @@ -72,7 +94,6 @@ impl<'eng> FnCompiler<'eng> { block_to_break_to: None, block_to_continue_to: None, lexical_map, - returns_by_ref, recreated_fns: HashMap::new(), current_fn_param: None, logged_types_map: logged_types_map.clone(), @@ -97,33 +118,19 @@ impl<'eng> FnCompiler<'eng> { ast_block: &ty::TyCodeBlock, ) -> Result { self.compile_with_new_scope(|fn_compiler| { - fn_compiler.compile_code_block_inner(context, md_mgr, ast_block) - }) - } - - fn compile_code_block_inner( - &mut self, - context: &mut Context, - md_mgr: &mut MetadataManager, - ast_block: &ty::TyCodeBlock, - ) -> Result { - self.lexical_map.enter_scope(); - - let mut ast_nodes = ast_block.contents.iter(); - let value_res = loop { - let ast_node = match ast_nodes.next() { - Some(ast_node) => ast_node, - None => break Ok(Constant::get_unit(context)), - }; - match self.compile_ast_node(context, md_mgr, ast_node) { - Ok(Some(val)) => break Ok(val), - Ok(None) => (), - Err(err) => break Err(err), + let mut ast_nodes = ast_block.contents.iter(); + loop { + let ast_node = match ast_nodes.next() { + Some(ast_node) => ast_node, + None => break Ok(Constant::get_unit(context)), + }; + match fn_compiler.compile_ast_node(context, md_mgr, ast_node) { + Ok(Some(val)) => break Ok(val), + Ok(None) => (), + Err(err) => break Err(err), + } } - }; - - self.lexical_map.leave_scope(); - value_res + }) } fn compile_ast_node( @@ -132,6 +139,13 @@ impl<'eng> FnCompiler<'eng> { md_mgr: &mut MetadataManager, ast_node: &ty::TyAstNode, ) -> Result, CompileError> { + let unexpected_decl = |decl_type: &'static str| { + Err(CompileError::UnexpectedDeclaration { + decl_type, + span: ast_node.span.clone(), + }) + }; + let span_md_idx = md_mgr.span_to_md(context, &ast_node.span); match &ast_node.content { ty::TyAstNodeContent::Declaration(td) => match td { @@ -143,21 +157,9 @@ impl<'eng> FnCompiler<'eng> { self.compile_const_decl(context, md_mgr, tcd, span_md_idx)?; Ok(None) } - ty::TyDecl::FunctionDecl { .. } => Err(CompileError::UnexpectedDeclaration { - decl_type: "function", - span: ast_node.span.clone(), - }), - ty::TyDecl::TraitDecl { .. } => Err(CompileError::UnexpectedDeclaration { - decl_type: "trait", - span: ast_node.span.clone(), - }), - ty::TyDecl::StructDecl { .. } => Err(CompileError::UnexpectedDeclaration { - decl_type: "struct", - span: ast_node.span.clone(), - }), ty::TyDecl::EnumDecl { decl_id, .. } => { let ted = self.decl_engine.get_enum(decl_id); - create_enum_aggregate( + create_tagged_union_type( self.type_engine, self.decl_engine, context, @@ -166,6 +168,10 @@ impl<'eng> FnCompiler<'eng> { .map(|_| ())?; Ok(None) } + ty::TyDecl::TypeAliasDecl { .. } => Err(CompileError::UnexpectedDeclaration { + decl_type: "type alias", + span: ast_node.span.clone(), + }), ty::TyDecl::ImplTrait { .. } => { // XXX What if we ignore the trait implementation??? Potentially since // we currently inline everything and below we 'recreate' the functions @@ -174,47 +180,76 @@ impl<'eng> FnCompiler<'eng> { // compile and then call these properly. Ok(None) } - ty::TyDecl::AbiDecl { .. } => Err(CompileError::UnexpectedDeclaration { - decl_type: "abi", - span: ast_node.span.clone(), - }), - ty::TyDecl::GenericTypeForFunctionScope { .. } => { - Err(CompileError::UnexpectedDeclaration { - decl_type: "abi", - span: ast_node.span.clone(), - }) - } - ty::TyDecl::ErrorRecovery { .. } => Err(CompileError::UnexpectedDeclaration { - decl_type: "error recovery", - span: ast_node.span.clone(), - }), - ty::TyDecl::StorageDecl { .. } => Err(CompileError::UnexpectedDeclaration { - decl_type: "storage", - span: ast_node.span.clone(), - }), - ty::TyDecl::TypeAliasDecl { .. } => Err(CompileError::UnexpectedDeclaration { - decl_type: "type alias", - span: ast_node.span.clone(), - }), + ty::TyDecl::FunctionDecl { .. } => unexpected_decl("function"), + ty::TyDecl::TraitDecl { .. } => unexpected_decl("trait"), + ty::TyDecl::StructDecl { .. } => unexpected_decl("struct"), + ty::TyDecl::AbiDecl { .. } => unexpected_decl("abi"), + ty::TyDecl::GenericTypeForFunctionScope { .. } => unexpected_decl("generic type"), + ty::TyDecl::ErrorRecovery { .. } => unexpected_decl("error recovery"), + ty::TyDecl::StorageDecl { .. } => unexpected_decl("storage"), }, ty::TyAstNodeContent::Expression(te) => { // An expression with an ignored return value... I assume. - let value = self.compile_expression(context, md_mgr, te)?; + let value = self.compile_expression_to_value(context, md_mgr, te)?; if value.is_diverging(context) { Ok(Some(value)) } else { Ok(None) } } - ty::TyAstNodeContent::ImplicitReturnExpression(te) => { - self.compile_expression(context, md_mgr, te).map(Some) - } + ty::TyAstNodeContent::ImplicitReturnExpression(te) => self + .compile_expression_to_value(context, md_mgr, te) + .map(Some), // a side effect can be () because it just impacts the type system/namespacing. // There should be no new IR generated. ty::TyAstNodeContent::SideEffect(_) => Ok(None), } } + fn compile_expression_to_value( + &mut self, + context: &mut Context, + md_mgr: &mut MetadataManager, + ast_expr: &ty::TyExpression, + ) -> Result { + // Compile expression which *may* be a pointer. We can't return a pointer value here + // though, so add a `load` to it. + self.compile_expression(context, md_mgr, ast_expr) + .map(|val| { + if val.get_type(context).map_or(false, |ty| ty.is_ptr(context)) { + self.current_block.ins(context).load(val) + } else { + val + } + }) + } + + fn compile_expression_to_ptr( + &mut self, + context: &mut Context, + md_mgr: &mut MetadataManager, + ast_expr: &ty::TyExpression, + ) -> Result { + // Compile expression which *may* be a pointer. We can't return a value so create a + // temporary here, store the value and return its pointer. + let val = self.compile_expression(context, md_mgr, ast_expr)?; + let ty = match val.get_type(context) { + Some(ty) if !ty.is_ptr(context) => ty, + _ => return Ok(val), + }; + + // Create a temporary. + let temp_name = self.lexical_map.insert_anon(); + let tmp_var = self + .function + .new_local_var(context, temp_name, ty, None) + .map_err(|ir_error| CompileError::InternalOwned(ir_error.to_string(), Span::dummy()))?; + let tmp_val = self.current_block.ins(context).get_local(tmp_var); + self.current_block.ins(context).store(tmp_val, val); + + Ok(tmp_val) + } + fn compile_expression( &mut self, context: &mut Context, @@ -264,9 +299,10 @@ impl<'eng> FnCompiler<'eng> { ty::TyExpressionVariant::VariableExpression { name, call_path, .. } => self.compile_var_expr(context, call_path, name, span_md_idx), - ty::TyExpressionVariant::Array { contents } => { - self.compile_array_expr(context, md_mgr, contents, span_md_idx) - } + ty::TyExpressionVariant::Array { + elem_type, + contents, + } => self.compile_array_expr(context, md_mgr, elem_type, contents, span_md_idx), ty::TyExpressionVariant::ArrayIndex { prefix, index } => { self.compile_array_index(context, md_mgr, prefix, index, span_md_idx) } @@ -279,7 +315,7 @@ impl<'eng> FnCompiler<'eng> { ast_expr.span.clone(), )), ty::TyExpressionVariant::MatchExp { desugared, .. } => { - self.compile_expression(context, md_mgr, desugared) + self.compile_expression_to_value(context, md_mgr, desugared) } ty::TyExpressionVariant::IfExp { condition, @@ -448,13 +484,12 @@ impl<'eng> FnCompiler<'eng> { span_md_idx: Option, ) -> Result { // New name for the key - let key_name = "key_for_storage".to_string(); - let alias_key_name = compiler.lexical_map.insert(key_name.as_str().to_owned()); + let key_name = compiler.lexical_map.insert("key_for_storage".to_owned()); // Local variable for the key let key_var = compiler .function - .new_local_var(context, alias_key_name, Type::get_b256(context), None) + .new_local_var(context, key_name, Type::get_b256(context), None) .map_err(|ir_error| { CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) })?; @@ -490,7 +525,7 @@ impl<'eng> FnCompiler<'eng> { &exp.return_type, &exp.span, )?; - self.compile_expression(context, md_mgr, exp)?; + self.compile_expression_to_value(context, md_mgr, exp)?; Ok(Constant::get_uint( context, 64, @@ -514,7 +549,7 @@ impl<'eng> FnCompiler<'eng> { } Intrinsic::IsReferenceType => { let targ = type_arguments[0].clone(); - let val = !self.type_engine.get(targ.type_id).is_copy_type(); + let val = !self.type_engine.get_unaliased(targ.type_id).is_copy_type(); Ok(Constant::get_bool(context, val)) } Intrinsic::GetStorageKey => { @@ -528,8 +563,8 @@ impl<'eng> FnCompiler<'eng> { Intrinsic::Eq | Intrinsic::Gt | Intrinsic::Lt => { let lhs = &arguments[0]; let rhs = &arguments[1]; - let lhs_value = self.compile_expression(context, md_mgr, lhs)?; - let rhs_value = self.compile_expression(context, md_mgr, rhs)?; + let lhs_value = self.compile_expression_to_value(context, md_mgr, lhs)?; + let rhs_value = self.compile_expression_to_value(context, md_mgr, rhs)?; let pred = match kind { Intrinsic::Eq => Predicate::Equal, Intrinsic::Gt => Predicate::GreaterThan, @@ -543,7 +578,7 @@ impl<'eng> FnCompiler<'eng> { } Intrinsic::Gtf => { // The index is just a Value - let index = self.compile_expression(context, md_mgr, &arguments[0])?; + let index = self.compile_expression_to_value(context, md_mgr, &arguments[0])?; // The tx field ID has to be a compile-time constant because it becomes an // immediate @@ -586,34 +621,40 @@ impl<'eng> FnCompiler<'eng> { .gtf(index, tx_field_id) .add_metadatum(context, span_md_idx); - // Reinterpret the result of th `gtf` instruction (which is always `u64`) as type + // Reinterpret the result of the `gtf` instruction (which is always `u64`) as type // `T`. This requires an `int_to_ptr` instruction if `T` is a reference type. - if self.type_engine.get(target_type.type_id).is_copy_type() { + if self + .type_engine + .get_unaliased(target_type.type_id) + .is_copy_type() + { Ok(gtf_reg) } else { + let ptr_ty = Type::new_ptr(context, target_ir_type); Ok(self .current_block .ins(context) - .int_to_ptr(gtf_reg, target_ir_type) + .int_to_ptr(gtf_reg, ptr_ty) .add_metadatum(context, span_md_idx)) } } Intrinsic::AddrOf => { let exp = &arguments[0]; let value = self.compile_expression(context, md_mgr, exp)?; + let int_ty = Type::new_uint(context, 64); let span_md_idx = md_mgr.span_to_md(context, &span); Ok(self .current_block .ins(context) - .addr_of(value) + .ptr_to_int(value, int_ty) .add_metadatum(context, span_md_idx)) } Intrinsic::StateClear => { let key_exp = arguments[0].clone(); let number_of_slots_exp = arguments[1].clone(); - let key_value = self.compile_expression(context, md_mgr, &key_exp)?; + let key_value = self.compile_expression_to_value(context, md_mgr, &key_exp)?; let number_of_slots_value = - self.compile_expression(context, md_mgr, &number_of_slots_exp)?; + self.compile_expression_to_value(context, md_mgr, &number_of_slots_exp)?; let span_md_idx = md_mgr.span_to_md(context, &span); let key_var = store_key_in_local_mem(self, context, key_value, span_md_idx)?; Ok(self @@ -624,7 +665,7 @@ impl<'eng> FnCompiler<'eng> { } Intrinsic::StateLoadWord => { let exp = &arguments[0]; - let value = self.compile_expression(context, md_mgr, exp)?; + let value = self.compile_expression_to_value(context, md_mgr, exp)?; let span_md_idx = md_mgr.span_to_md(context, &span); let key_var = store_key_in_local_mem(self, context, value, span_md_idx)?; Ok(self @@ -638,7 +679,7 @@ impl<'eng> FnCompiler<'eng> { let val_exp = &arguments[1]; // Validate that the val_exp is of the right type. We couldn't do it // earlier during type checking as the type arguments may not have been resolved. - let val_ty = self.type_engine.to_typeinfo(val_exp.return_type, &span)?; + let val_ty = self.type_engine.get_unaliased(val_exp.return_type); if !val_ty.is_copy_type() { return Err(CompileError::IntrinsicUnsupportedArgType { name: kind.to_string(), @@ -646,8 +687,8 @@ impl<'eng> FnCompiler<'eng> { hint: Hint::new("This argument must be a copy type".to_string()), }); } - let key_value = self.compile_expression(context, md_mgr, key_exp)?; - let val_value = self.compile_expression(context, md_mgr, val_exp)?; + let key_value = self.compile_expression_to_value(context, md_mgr, key_exp)?; + let val_value = self.compile_expression_to_value(context, md_mgr, val_exp)?; let span_md_idx = md_mgr.span_to_md(context, &span); let key_var = store_key_in_local_mem(self, context, key_value, span_md_idx)?; Ok(self @@ -662,7 +703,7 @@ impl<'eng> FnCompiler<'eng> { let number_of_slots_exp = arguments[2].clone(); // Validate that the val_exp is of the right type. We couldn't do it // earlier during type checking as the type arguments may not have been resolved. - let val_ty = self.type_engine.to_typeinfo(val_exp.return_type, &span)?; + let val_ty = self.type_engine.get_unaliased(val_exp.return_type); if !val_ty.eq(&TypeInfo::RawUntypedPtr, engines) { return Err(CompileError::IntrinsicUnsupportedArgType { name: kind.to_string(), @@ -670,18 +711,19 @@ impl<'eng> FnCompiler<'eng> { hint: Hint::new("This argument must be raw_ptr".to_string()), }); } - let key_value = self.compile_expression(context, md_mgr, &key_exp)?; - let val_value = self.compile_expression(context, md_mgr, &val_exp)?; + let key_value = self.compile_expression_to_value(context, md_mgr, &key_exp)?; + let val_value = self.compile_expression_to_value(context, md_mgr, &val_exp)?; let number_of_slots_value = - self.compile_expression(context, md_mgr, &number_of_slots_exp)?; + self.compile_expression_to_value(context, md_mgr, &number_of_slots_exp)?; let span_md_idx = md_mgr.span_to_md(context, &span); let key_var = store_key_in_local_mem(self, context, key_value, span_md_idx)?; let b256_ty = Type::get_b256(context); + let b256_ptr_ty = Type::new_ptr(context, b256_ty); // For quad word, the IR instructions take in a pointer rather than a raw u64. let val_ptr = self .current_block .ins(context) - .int_to_ptr(val_value, b256_ty) + .int_to_ptr(val_value, b256_ptr_ty) .add_metadatum(context, span_md_idx); match kind { Intrinsic::StateLoadQuad => Ok(self @@ -699,7 +741,7 @@ impl<'eng> FnCompiler<'eng> { } Intrinsic::Log => { // The log value and the log ID are just Value. - let log_val = self.compile_expression(context, md_mgr, &arguments[0])?; + let log_val = self.compile_expression_to_value(context, md_mgr, &arguments[0])?; let log_id = match self.logged_types_map.get(&arguments[0].return_type) { None => { return Err(CompileError::Internal( @@ -748,15 +790,16 @@ impl<'eng> FnCompiler<'eng> { }; let lhs = &arguments[0]; let rhs = &arguments[1]; - let lhs_value = self.compile_expression(context, md_mgr, lhs)?; - let rhs_value = self.compile_expression(context, md_mgr, rhs)?; + let lhs_value = self.compile_expression_to_value(context, md_mgr, lhs)?; + let rhs_value = self.compile_expression_to_value(context, md_mgr, rhs)?; Ok(self .current_block .ins(context) .binary_op(op, lhs_value, rhs_value)) } Intrinsic::Revert => { - let revert_code_val = self.compile_expression(context, md_mgr, &arguments[0])?; + let revert_code_val = + self.compile_expression_to_value(context, md_mgr, &arguments[0])?; // The `revert` instruction let span_md_idx = md_mgr.span_to_md(context, &span); @@ -786,8 +829,8 @@ impl<'eng> FnCompiler<'eng> { let lhs = &arguments[0]; let count = &arguments[1]; - let lhs_value = self.compile_expression(context, md_mgr, lhs)?; - let count_value = self.compile_expression(context, md_mgr, count)?; + let lhs_value = self.compile_expression_to_value(context, md_mgr, lhs)?; + let count_value = self.compile_expression_to_value(context, md_mgr, count)?; let rhs_value = self.current_block.ins(context).binary_op( BinaryOpKind::Mul, len_value, @@ -803,26 +846,22 @@ impl<'eng> FnCompiler<'eng> { /* First operand: recipient + message data */ // Step 1: compile the user data and get its type - let user_message = self.compile_expression(context, md_mgr, &arguments[1])?; - let user_message_type = match user_message.get_type(context) { - Some(user_message_type) => user_message_type, - None => { - return Err(CompileError::Internal( - "Unable to determine type for message data.", - span, - )) - } - }; + let user_message = + self.compile_expression_to_value(context, md_mgr, &arguments[1])?; + let user_message_type = user_message.get_type(context).ok_or_else(|| { + CompileError::Internal( + "Unable to determine type for message data.", + span.clone(), + ) + })?; // Step 2: build a struct with two fields: // - The first field is a `b256` that contains the `recipient` // - The second field is a `u64` that contains the message ID // - The third field contains the actual user data - let field_types = [ - Type::get_b256(context), - Type::get_uint64(context), - user_message_type, - ]; + let b256_ty = Type::get_b256(context); + let u64_ty = Type::get_uint64(context); + let field_types = [b256_ty, u64_ty, user_message_type]; let recipient_and_message_aggregate = Type::new_struct(context, field_types.to_vec()); @@ -841,77 +880,77 @@ impl<'eng> FnCompiler<'eng> { })?; // Step 4: Convert the local variable into a value via `get_local`. - let mut recipient_and_message = self + let recipient_and_message = self .current_block .ins(context) .get_local(recipient_and_message_ptr) .add_metadatum(context, span_md_idx); // Step 5: compile the `recipient` and insert it as the first field of the struct - let recipient = self.compile_expression(context, md_mgr, &arguments[0])?; - recipient_and_message = self - .current_block + let recipient = self.compile_expression_to_value(context, md_mgr, &arguments[0])?; + let gep_val = self.current_block.ins(context).get_elem_ptr_with_idx( + recipient_and_message, + b256_ty, + 0, + ); + self.current_block .ins(context) - .insert_value( - recipient_and_message, - recipient_and_message_aggregate, - recipient, - vec![0], - ) + .store(gep_val, recipient) .add_metadatum(context, span_md_idx); // Step 6: Grab the message ID from `messages_types_map` and insert it as the // second field of the struct - let message_id = match self.messages_types_map.get(&arguments[1].return_type) { - None => { - return Err(CompileError::Internal( + let message_id_val = self + .messages_types_map + .get(&arguments[1].return_type) + .map(|&msg_id| Constant::get_uint(context, 64, *msg_id as u64)) + .ok_or_else(|| { + CompileError::Internal( "Unable to determine ID for smo instance.", - span, - )) - } - Some(message_id) => { - convert_literal_to_value(context, &Literal::U64(**message_id as u64)) - } - }; - recipient_and_message = self - .current_block + span.clone(), + ) + })?; + let gep_val = self.current_block.ins(context).get_elem_ptr_with_idx( + recipient_and_message, + u64_ty, + 1, + ); + self.current_block .ins(context) - .insert_value( - recipient_and_message, - recipient_and_message_aggregate, - message_id, - vec![1], - ) + .store(gep_val, message_id_val) .add_metadatum(context, span_md_idx); // Step 7: Insert the user message data as the third field of the struct - recipient_and_message = self - .current_block + let gep_val = self.current_block.ins(context).get_elem_ptr_with_idx( + recipient_and_message, + user_message_type, + 2, + ); + let user_message_size = 8 + ir_type_size_in_bytes(context, &user_message_type); + self.current_block .ins(context) - .insert_value( - recipient_and_message, - recipient_and_message_aggregate, - user_message, - vec![2], - ) + .store(gep_val, user_message) .add_metadatum(context, span_md_idx); /* Second operand: the size of the message data */ - let message_size = convert_literal_to_value( - context, - &Literal::U64(8 + ir_type_size_in_bytes(context, &user_message_type)), - ); + let user_message_size_val = Constant::get_uint(context, 64, user_message_size); /* Third operand: the output index */ - let output_index = self.compile_expression(context, md_mgr, &arguments[2])?; + let output_index = + self.compile_expression_to_value(context, md_mgr, &arguments[2])?; /* Fourth operand: the amount of coins to send */ - let coins = self.compile_expression(context, md_mgr, &arguments[3])?; + let coins = self.compile_expression_to_value(context, md_mgr, &arguments[3])?; Ok(self .current_block .ins(context) - .smo(recipient_and_message, message_size, output_index, coins) + .smo( + recipient_and_message, + user_message_size_val, + output_index, + coins, + ) .add_metadatum(context, span_md_idx)) } } @@ -928,48 +967,26 @@ impl<'eng> FnCompiler<'eng> { return Ok(Constant::get_unit(context)); } - let ret_value = self.compile_expression(context, md_mgr, ast_expr)?; - + let ret_value = self.compile_expression_to_value(context, md_mgr, ast_expr)?; if ret_value.is_diverging(context) { return Ok(ret_value); } let span_md_idx = md_mgr.span_to_md(context, &ast_expr.span); - - if self.returns_by_ref { - // We need to copy the actual return value to the out parameter. - self.compile_copy_to_last_arg(context, ret_value, span_md_idx); - } - - match ret_value.get_type(context) { - None => Err(CompileError::Internal( - "Unable to determine type for return statement expression.", - ast_expr.span.clone(), - )), - Some(ret_ty) => Ok(self - .current_block - .ins(context) - .ret(ret_value, ret_ty) - .add_metadatum(context, span_md_idx)), - } - } - - pub(super) fn compile_copy_to_last_arg( - &mut self, - context: &mut Context, - ret_val: Value, - span_md_idx: Option, - ) -> Value { - let dst_val = self.function.args_iter(context).last().unwrap().1; - let src_val = ret_val; - let byte_len = ir_type_size_in_bytes(context, &src_val.get_type(context).unwrap()); - - self.current_block - .ins(context) - .mem_copy(dst_val, src_val, byte_len) - .add_metadatum(context, span_md_idx); - - dst_val + ret_value + .get_type(context) + .map(|ret_ty| { + self.current_block + .ins(context) + .ret(ret_value, ret_ty) + .add_metadatum(context, span_md_idx) + }) + .ok_or_else(|| { + CompileError::Internal( + "Unable to determine type for return statement expression.", + ast_expr.span.clone(), + ) + }) } fn compile_lazy_op( @@ -983,13 +1000,13 @@ impl<'eng> FnCompiler<'eng> { ) -> Result { // Short-circuit: if LHS is true for AND we still must eval the RHS block; for OR we can // skip the RHS block, and vice-versa. - let lhs_val = self.compile_expression(context, md_mgr, ast_lhs)?; + let lhs_val = self.compile_expression_to_value(context, md_mgr, ast_lhs)?; let cond_block_end = self.current_block; let rhs_block = self.function.create_block(context, None); let final_block = self.function.create_block(context, None); self.current_block = rhs_block; - let rhs_val = self.compile_expression(context, md_mgr, ast_rhs)?; + let rhs_val = self.compile_expression_to_value(context, md_mgr, ast_rhs)?; let merge_val_arg_idx = final_block.new_arg( context, @@ -998,7 +1015,6 @@ impl<'eng> FnCompiler<'eng> { .get_type(context) .unwrap_or_else(|| Type::get_unit(context)) }), - false, ); if !cond_block_end.is_terminated(context) { @@ -1042,24 +1058,28 @@ impl<'eng> FnCompiler<'eng> { contract_call_parameters: &HashMap, ast_name: &str, ast_args: &[(Ident, ty::TyExpression)], - return_type: TypeId, + ast_return_type: TypeId, span_md_idx: Option, ) -> Result { + // XXX This is very FuelVM specific and needs to be broken out of here and called + // conditionally based on the target. + // Compile each user argument let compiled_args = ast_args .iter() - .map(|(_, expr)| self.compile_expression(context, md_mgr, expr)) + .map(|(_, expr)| self.compile_expression_to_value(context, md_mgr, expr)) .collect::, CompileError>>()?; + let u64_ty = Type::get_uint64(context); + let user_args_val = match compiled_args.len() { 0 => Constant::get_uint(context, 64, 0), 1 => { // The single arg doesn't need to be put into a struct. let arg0 = compiled_args[0]; - let u64_ty = Type::get_uint64(context); if self .type_engine - .get(ast_args[0].1.return_type) + .get_unaliased(ast_args[0].1.return_type) .is_copy_type() { self.current_block @@ -1067,27 +1087,23 @@ impl<'eng> FnCompiler<'eng> { .bitcast(arg0, u64_ty) .add_metadatum(context, span_md_idx) } else { - // Copy this value to a new location. This is quite inefficient but we need to - // pass by reference rather than by value. Optimisation passes can remove all - // the unnecessary copying eventually, though it feels like we're jumping - // through a bunch of hoops here (employing the single arg optimisation) for - // minimal returns. - let by_reference_arg_name = self + // Use a temporary to pass a reference to the arg. + let arg0_type = arg0.get_type(context).unwrap(); + let temp_arg_name = self .lexical_map .insert(format!("{}{}", "arg_for_", ast_name)); - let arg0_type = arg0.get_type(context).unwrap(); - let by_reference_arg = self + let temp_var = self .function - .new_local_var(context, by_reference_arg_name, arg0_type, None) + .new_local_var(context, temp_arg_name, arg0_type, None) .map_err(|ir_error| { CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) })?; - let arg0_var = self.current_block.ins(context).get_local(by_reference_arg); - self.current_block.ins(context).store(arg0_var, arg0); + let temp_val = self.current_block.ins(context).get_local(temp_var); + self.current_block.ins(context).store(temp_val, arg0); - // NOTE: Here we're casting the original local variable to u64. - self.current_block.ins(context).addr_of(arg0_var) + // NOTE: Here we're casting the temp pointer to an integer. + self.current_block.ins(context).ptr_to_int(temp_val, u64_ty) } } _ => { @@ -1096,7 +1112,7 @@ impl<'eng> FnCompiler<'eng> { .iter() .filter_map(|val| val.get_type(context)) .collect::>(); - let user_args_struct_aggregate = Type::new_struct(context, field_types); + let user_args_struct_type = Type::new_struct(context, field_types.clone()); // New local pointer for the struct to hold all user arguments let user_args_struct_local_name = self @@ -1107,7 +1123,7 @@ impl<'eng> FnCompiler<'eng> { .new_local_var( context, user_args_struct_local_name, - user_args_struct_aggregate, + user_args_struct_type, None, ) .map_err(|ir_error| { @@ -1120,62 +1136,66 @@ impl<'eng> FnCompiler<'eng> { .ins(context) .get_local(user_args_struct_var) .add_metadatum(context, span_md_idx); - compiled_args.into_iter().enumerate().fold( - user_args_struct_val, - |user_args_struct_ptr_val, (insert_idx, insert_val)| { - self.current_block + compiled_args + .into_iter() + .zip(field_types.into_iter()) + .enumerate() + .for_each(|(insert_idx, (field_val, field_type))| { + let gep_val = self + .current_block .ins(context) - .insert_value( - user_args_struct_ptr_val, - user_args_struct_aggregate, - insert_val, - vec![insert_idx as u64], + .get_elem_ptr_with_idx( + user_args_struct_val, + field_type, + insert_idx as u64, ) - .add_metadatum(context, span_md_idx) - }, - ); + .add_metadatum(context, span_md_idx); + + self.current_block + .ins(context) + .store(gep_val, field_val) + .add_metadatum(context, span_md_idx); + }); - // NOTE: Here casting the local var struct to a u64. + // NOTE: Here we're casting the args struct pointer to an integer. self.current_block .ins(context) - .addr_of(user_args_struct_val) + .ptr_to_int(user_args_struct_val, u64_ty) .add_metadatum(context, span_md_idx) } }; // Now handle the contract address and the selector. The contract address is just // as B256 while the selector is a [u8; 4] which we have to convert to a U64. - let ra_struct_aggregate = Type::new_struct( - context, - [ - Type::get_b256(context), - Type::get_uint64(context), - Type::get_uint64(context), - ] - .to_vec(), - ); + let b256_ty = Type::get_b256(context); + let ra_struct_type = Type::new_struct(context, [b256_ty, u64_ty, u64_ty].to_vec()); let ra_struct_var = self .function .new_local_var( context, self.lexical_map.insert_anon(), - ra_struct_aggregate, + ra_struct_type, None, ) .map_err(|ir_error| CompileError::InternalOwned(ir_error.to_string(), Span::dummy()))?; - let mut ra_struct_val = self + + let ra_struct_ptr_val = self .current_block .ins(context) .get_local(ra_struct_var) .add_metadatum(context, span_md_idx); // Insert the contract address - let addr = self.compile_expression(context, md_mgr, &call_params.contract_address)?; - ra_struct_val = self - .current_block + let addr = + self.compile_expression_to_value(context, md_mgr, &call_params.contract_address)?; + let gep_val = + self.current_block + .ins(context) + .get_elem_ptr_with_idx(ra_struct_ptr_val, b256_ty, 0); + self.current_block .ins(context) - .insert_value(ra_struct_val, ra_struct_aggregate, addr, vec![0]) + .store(gep_val, addr) .add_metadatum(context, span_md_idx); // Convert selector to U64 and then insert it @@ -1187,24 +1207,30 @@ impl<'eng> FnCompiler<'eng> { ), ) .add_metadatum(context, span_md_idx); - ra_struct_val = self - .current_block + let gep_val = + self.current_block + .ins(context) + .get_elem_ptr_with_idx(ra_struct_ptr_val, u64_ty, 1); + self.current_block .ins(context) - .insert_value(ra_struct_val, ra_struct_aggregate, sel_val, vec![1]) + .store(gep_val, sel_val) .add_metadatum(context, span_md_idx); // Insert the user args value. - ra_struct_val = self - .current_block + let gep_val = + self.current_block + .ins(context) + .get_elem_ptr_with_idx(ra_struct_ptr_val, u64_ty, 2); + self.current_block .ins(context) - .insert_value(ra_struct_val, ra_struct_aggregate, user_args_val, vec![2]) + .store(gep_val, user_args_val) .add_metadatum(context, span_md_idx); // Compile all other call parameters let coins = match contract_call_parameters .get(&constants::CONTRACT_CALL_COINS_PARAMETER_NAME.to_string()) { - Some(coins_expr) => self.compile_expression(context, md_mgr, coins_expr)?, + Some(coins_expr) => self.compile_expression_to_value(context, md_mgr, coins_expr)?, None => convert_literal_to_value( context, &Literal::U64(constants::CONTRACT_CALL_COINS_PARAMETER_DEFAULT_VALUE), @@ -1212,21 +1238,38 @@ impl<'eng> FnCompiler<'eng> { .add_metadatum(context, span_md_idx), }; + // As this is Fuel VM specific we can compile the asset ID directly to a `ptr b256` + // pointer. let asset_id = match contract_call_parameters .get(&constants::CONTRACT_CALL_ASSET_ID_PARAMETER_NAME.to_string()) { - Some(asset_id_expr) => self.compile_expression(context, md_mgr, asset_id_expr)?, - None => convert_literal_to_value( - context, - &Literal::B256(constants::CONTRACT_CALL_ASSET_ID_PARAMETER_DEFAULT_VALUE), - ) - .add_metadatum(context, span_md_idx), + Some(asset_id_expr) => { + self.compile_expression_to_ptr(context, md_mgr, asset_id_expr)? + } + None => { + let asset_id_val = convert_literal_to_value( + context, + &Literal::B256(constants::CONTRACT_CALL_ASSET_ID_PARAMETER_DEFAULT_VALUE), + ) + .add_metadatum(context, span_md_idx); + + let tmp_asset_id_name = self.lexical_map.insert_anon(); + let tmp_var = self + .function + .new_local_var(context, tmp_asset_id_name, b256_ty, None) + .map_err(|ir_error| { + CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) + })?; + let tmp_val = self.current_block.ins(context).get_local(tmp_var); + self.current_block.ins(context).store(tmp_val, asset_id_val); + tmp_val + } }; let gas = match contract_call_parameters .get(&constants::CONTRACT_CALL_GAS_PARAMETER_NAME.to_string()) { - Some(gas_expr) => self.compile_expression(context, md_mgr, gas_expr)?, + Some(gas_expr) => self.compile_expression_to_value(context, md_mgr, gas_expr)?, None => self .current_block .ins(context) @@ -1234,26 +1277,43 @@ impl<'eng> FnCompiler<'eng> { .add_metadatum(context, span_md_idx), }; + // Convert the return type. If it's a reference type then make it a pointer. let return_type = convert_resolved_typeid_no_span( self.type_engine, self.decl_engine, context, - &return_type, + &ast_return_type, )?; + let ret_is_copy_type = self + .type_engine + .get_unaliased(ast_return_type) + .is_copy_type(); + let return_type = if ret_is_copy_type { + return_type + } else { + Type::new_ptr(context, return_type) + }; // Insert the contract_call instruction - Ok(self + let call_val = self .current_block .ins(context) .contract_call( return_type, ast_name.to_string(), - ra_struct_val, + ra_struct_ptr_val, coins, asset_id, gas, ) - .add_metadatum(context, span_md_idx)) + .add_metadatum(context, span_md_idx); + + // If it's a pointer then also load it. + Ok(if ret_is_copy_type { + call_val + } else { + self.current_block.ins(context).load(call_val) + }) } #[allow(clippy::too_many_arguments)] @@ -1324,39 +1384,19 @@ impl<'eng> FnCompiler<'eng> { }; // Now actually call the new function. - let mut args = { - let mut args = Vec::with_capacity(ast_args.len()); - for ((_, expr), param) in ast_args.iter().zip(callee.parameters.iter()) { - self.current_fn_param = Some(param.clone()); - let arg = self.compile_expression(context, md_mgr, expr)?; - if arg.is_diverging(context) { - return Ok(arg); - } - self.current_fn_param = None; - args.push(arg); - } - args - }; - - // If there is an 'unexpected' extra arg in the callee and it's a in/out then we need to - // set up returning by reference. - if args.len() + 1 == new_callee.num_args(context) { - if let Some((arg_ty, _by_ref)) = new_callee - .args_iter(context) - .last() - .unwrap() - .1 - .get_argument_type_and_byref(context) - { - // Create a local to pass in as the 'out' parameter. - let local_name = format!("__ret_val_{}", new_callee.get_name(context)); - let local_ptr = self - .function - .new_unique_local_var(context, local_name, arg_ty, None); - - // Pass it as the final arg. - args.push(self.current_block.ins(context).get_local(local_ptr)); + let mut args = Vec::with_capacity(ast_args.len()); + for ((_, expr), param) in ast_args.iter().zip(callee.parameters.iter()) { + self.current_fn_param = Some(param.clone()); + let arg = if param.is_reference && param.is_mutable { + self.compile_expression_to_ptr(context, md_mgr, expr) + } else { + self.compile_expression_to_value(context, md_mgr, expr) + }?; + if arg.is_diverging(context) { + return Ok(arg); } + self.current_fn_param = None; + args.push(arg); } let state_idx_md_idx = self_state_idx.and_then(|self_state_idx| { @@ -1383,7 +1423,7 @@ impl<'eng> FnCompiler<'eng> { // Compile the condition expression in the entry block. Then save the current block so we // can jump to the true and false blocks after we've created them. let cond_span_md_idx = md_mgr.span_to_md(context, &ast_condition.span); - let cond_value = self.compile_expression(context, md_mgr, ast_condition)?; + let cond_value = self.compile_expression_to_value(context, md_mgr, ast_condition)?; if cond_value.is_diverging(context) { return Ok(cond_value); } @@ -1404,14 +1444,14 @@ impl<'eng> FnCompiler<'eng> { let true_block_begin = self.function.create_block(context, None); self.current_block = true_block_begin; - let true_value = self.compile_expression(context, md_mgr, ast_then)?; + let true_value = self.compile_expression_to_value(context, md_mgr, ast_then)?; let true_block_end = self.current_block; let false_block_begin = self.function.create_block(context, None); self.current_block = false_block_begin; let false_value = match ast_else { None => Constant::get_unit(context), - Some(expr) => self.compile_expression(context, md_mgr, expr)?, + Some(expr) => self.compile_expression_to_value(context, md_mgr, expr)?, }; let false_block_end = self.current_block; @@ -1436,7 +1476,7 @@ impl<'eng> FnCompiler<'eng> { let merge_block = self.function.create_block(context, None); // Add a single argument to merge_block that merges true_value and false_value. // Rely on the type of the ast node when creating that argument - let merge_val_arg_idx = merge_block.new_arg(context, return_type, false); + let merge_val_arg_idx = merge_block.new_arg(context, return_type); if !true_block_end.is_terminated(context) { true_block_end .ins(context) @@ -1459,8 +1499,8 @@ impl<'eng> FnCompiler<'eng> { exp: &ty::TyExpression, variant: &ty::TyEnumVariant, ) -> Result { - // retrieve the aggregate info for the enum - let enum_aggregate = match convert_resolved_typeid( + // Retrieve the type info for the enum. + let enum_type = match convert_resolved_typeid( self.type_engine, self.decl_engine, context, @@ -1475,13 +1515,25 @@ impl<'eng> FnCompiler<'eng> { )); } }; - // compile the expression to asm - let compiled_value = self.compile_expression(context, md_mgr, exp)?; - // retrieve the value minus the tag - Ok(self.current_block.ins(context).extract_value( + + // Compile the struct expression. + let compiled_value = self.compile_expression_to_ptr(context, md_mgr, exp)?; + + // Get the variant type. + let variant_type = enum_type + .get_indexed_type(context, &[1, variant.tag as u64]) + .ok_or_else(|| { + CompileError::Internal( + "Failed to get variant type from enum in `unsigned downcast`.", + exp.span.clone(), + ) + })?; + + // Get the offset to the variant. + Ok(self.current_block.ins(context).get_elem_ptr_with_idcs( compiled_value, - enum_aggregate, - vec![1, variant.tag as u64], + variant_type, + &[1, variant.tag as u64], )) } @@ -1492,23 +1544,13 @@ impl<'eng> FnCompiler<'eng> { exp: Box, ) -> Result { let tag_span_md_idx = md_mgr.span_to_md(context, &exp.span); - let enum_aggregate = match convert_resolved_typeid( - self.type_engine, - self.decl_engine, - context, - &exp.return_type, - &exp.span, - )? { - ty if ty.is_struct(context) => ty, - _ => { - return Err(CompileError::Internal("Expected enum type here.", exp.span)); - } - }; - let exp = self.compile_expression(context, md_mgr, &exp)?; + let struct_val = self.compile_expression_to_ptr(context, md_mgr, &exp)?; + + let u64_ty = Type::get_uint64(context); Ok(self .current_block .ins(context) - .extract_value(exp, enum_aggregate, vec![0]) + .get_elem_ptr_with_idx(struct_val, u64_ty, 0) .add_metadatum(context, tag_span_md_idx)) } @@ -1585,7 +1627,7 @@ impl<'eng> FnCompiler<'eng> { // Add the conditional in the cond block which jumps into the body or out to the final // block. self.current_block = cond_block; - let cond_value = self.compile_expression(context, md_mgr, condition)?; + let cond_value = self.compile_expression_to_value(context, md_mgr, condition)?; if !self.current_block.is_terminated(context) { self.current_block.ins(context).conditional_branch( cond_value, @@ -1600,13 +1642,13 @@ impl<'eng> FnCompiler<'eng> { Ok(Constant::get_unit(context).add_metadatum(context, span_md_idx)) } - pub fn get_function_var(&self, context: &mut Context, name: &str) -> Option { + pub(crate) fn get_function_var(&self, context: &mut Context, name: &str) -> Option { self.lexical_map .get(name) .and_then(|local_name| self.function.get_local_var(context, local_name)) } - pub fn get_function_arg(&self, context: &mut Context, name: &str) -> Option { + pub(crate) fn get_function_arg(&self, context: &mut Context, name: &str) -> Option { self.function.get_arg(context, name) } @@ -1617,53 +1659,20 @@ impl<'eng> FnCompiler<'eng> { name: &Ident, span_md_idx: Option, ) -> Result { - let need_to_load = |ty: &Type, context: &Context| { - ty.is_unit(context) || ty.is_bool(context) || ty.is_uint(context) - }; - - let call_path = match call_path { - Some(call_path) => call_path.clone(), - None => CallPath::from(name.clone()), - }; + let call_path = call_path + .clone() + .unwrap_or_else(|| CallPath::from(name.clone())); // We need to check the symbol map first, in case locals are shadowing the args, other // locals or even constants. if let Some(var) = self.get_function_var(context, name.as_str()) { - let local_val = self + Ok(self .current_block .ins(context) .get_local(var) - .add_metadatum(context, span_md_idx); - let fn_param = self.current_fn_param.as_ref(); - let is_ref_primitive = fn_param.is_some() - && self - .type_engine - .get(fn_param.unwrap().type_argument.type_id) - .is_copy_type() - && fn_param.unwrap().is_reference - && fn_param.unwrap().is_mutable; - if !is_ref_primitive && need_to_load(&var.get_type(context), context) { - Ok(self - .current_block - .ins(context) - .load(local_val) - .add_metadatum(context, span_md_idx)) - } else { - Ok(local_val) - } + .add_metadatum(context, span_md_idx)) } else if let Some(val) = self.function.get_arg(context, name.as_str()) { - if val - .get_argument_type_and_byref(context) - .map_or(false, |(_ty, by_ref)| by_ref) - { - Ok(self - .current_block - .ins(context) - .load(val) - .add_metadatum(context, span_md_idx)) - } else { - Ok(val) - } + Ok(val) } else if let Some(const_val) = self .module .get_global_constant(context, &call_path.as_vec_string()) @@ -1693,18 +1702,13 @@ impl<'eng> FnCompiler<'eng> { // Nothing to do for an abi cast declarations. The address specified in them is already // provided in each contract call node in the AST. if matches!( - &self - .type_engine - .to_typeinfo(body.return_type, &body.span) - .map_err(|ty_err| { - CompileError::InternalOwned(format!("{ty_err:?}"), body.span.clone()) - })?, + &self.type_engine.get_unaliased(body.return_type), TypeInfo::ContractCaller { .. } ) { return Ok(None); } - // Grab these before we move body into compilation. + // Grab this before we move body into compilation. let return_type = convert_resolved_typeid( self.type_engine, self.decl_engine, @@ -1716,7 +1720,7 @@ impl<'eng> FnCompiler<'eng> { // We must compile the RHS before checking for shadowing, as it will still be in the // previous scope. let body_deterministically_aborts = body.deterministically_aborts(self.decl_engine, false); - let init_val = self.compile_expression(context, md_mgr, body)?; + let init_val = self.compile_expression_to_value(context, md_mgr, body)?; if init_val.is_diverging(context) || body_deterministically_aborts { return Ok(Some(init_val)); } @@ -1730,14 +1734,14 @@ impl<'eng> FnCompiler<'eng> { // otherwise use a store. let var_ty = local_var.get_type(context); if ir_type_size_in_bytes(context, &var_ty) > 0 { - let local_val = self + let local_ptr = self .current_block .ins(context) .get_local(local_var) .add_metadatum(context, span_md_idx); self.current_block .ins(context) - .store(local_val, init_val) + .store(local_ptr, init_val) .add_metadatum(context, span_md_idx); } Ok(None) @@ -1825,115 +1829,115 @@ impl<'eng> FnCompiler<'eng> { .expect("All local symbols must be in the lexical symbol map."); // First look for a local variable with the required name - let mut val = match self.function.get_local_var(context, name) { - Some(var) => self - .current_block - .ins(context) - .get_local(var) - .add_metadatum(context, span_md_idx), - None => { + let lhs_val = self + .function + .get_local_var(context, name) + .map(|var| { + self.current_block + .ins(context) + .get_local(var) + .add_metadatum(context, span_md_idx) + }) + .or_else(|| // Now look for an argument with the required name self.function .args_iter(context) - .find(|arg| &arg.0 == name) - .ok_or_else(|| { - CompileError::InternalOwned( - format!("variable not found: {name}"), - ast_reassignment.lhs_base_name.span(), - ) - })? - .1 - } - }; + .find_map(|(arg_name, arg_val)| (arg_name == name).then_some(*arg_val))) + .ok_or_else(|| { + CompileError::InternalOwned( + format!("variable not found: {name}"), + ast_reassignment.lhs_base_name.span(), + ) + })?; - let reassign_val = self.compile_expression(context, md_mgr, &ast_reassignment.rhs)?; + let reassign_val = + self.compile_expression_to_value(context, md_mgr, &ast_reassignment.rhs)?; if reassign_val.is_diverging(context) { return Ok(reassign_val); } - if ast_reassignment.lhs_indices.is_empty() { - // A non-aggregate; use a `store`. - self.current_block - .ins(context) - .store(val, reassign_val) - .add_metadatum(context, span_md_idx); - } else if ast_reassignment - .lhs_indices - .iter() - .any(|f| matches!(f, ProjectionKind::ArrayIndex { .. })) - { - let it = &mut ast_reassignment.lhs_indices.iter().peekable(); - while let Some(ProjectionKind::ArrayIndex { index, .. }) = it.next() { - let index_val = self.compile_expression(context, md_mgr, index)?; - if index_val.is_diverging(context) { - return Ok(index_val); - } - - let ty = match val.get_type(context).unwrap() { - ty if ty.is_array(context) => ty, - _otherwise => { - let spans = ast_reassignment - .lhs_indices - .iter() - .fold(ast_reassignment.lhs_base_name.span(), |acc, lhs| { - Span::join(acc, lhs.span()) - }); - return Err(CompileError::Internal( - "Array index reassignment to non-array.", - spans, - )); - } - }; - - // When handling nested array indexing, we should keep extracting the first - // elements up until the last, and insert into the last element. - let is_last_index = it.peek().is_none(); - if is_last_index { - val = self - .current_block - .ins(context) - .insert_element(val, ty, reassign_val, index_val) - .add_metadatum(context, span_md_idx); - } else { - val = self - .current_block - .ins(context) - .extract_element(val, ty, index_val) - .add_metadatum(context, span_md_idx); - } - } + let lhs_ptr = if ast_reassignment.lhs_indices.is_empty() { + // A non-aggregate; use a direct `store`. + lhs_val } else { - // An aggregate. Iterate over the field names from the left hand side and collect - // field indices. The struct type from the previous iteration is used to determine the - // field type for the current iteration. - let field_idcs = get_indices_for_struct_access( - self.type_engine, - self.decl_engine, - ast_reassignment.lhs_type, - &ast_reassignment.lhs_indices, - )?; - - let ty = match val.get_type(context).unwrap() { - ty if ty.is_struct(context) => ty, - _otherwise => { - let spans = ast_reassignment - .lhs_indices - .iter() - .fold(ast_reassignment.lhs_base_name.span(), |acc, lhs| { - Span::join(acc, lhs.span()) - }); - return Err(CompileError::Internal( - "Reassignment with multiple accessors to non-aggregate.", - spans, - )); - } - }; + // Create a GEP by following the chain of LHS indices. We use a scan which is + // essentially a map with context, which is the parent type id for the current field. + let gep_indices = ast_reassignment + .lhs_indices + .iter() + .scan(ast_reassignment.lhs_type, |cur_type_id, idx_kind| { + let cur_type_info = self.type_engine.get_unaliased(*cur_type_id); + Some(match (idx_kind, cur_type_info) { + ( + ProjectionKind::StructField { name: idx_name }, + TypeInfo::Struct(decl_ref), + ) => { + // Get the struct type info, with field names. + let ty::TyStructDecl { + call_path: struct_call_path, + fields: struct_fields, + .. + } = self.decl_engine.get_struct(&decl_ref); + + // Search for the index to the field name we're after, and its type + // id. + struct_fields + .iter() + .enumerate() + .find(|(_, field)| field.name == *idx_name) + .map(|(idx, field)| (idx as u64, field.type_argument.type_id)) + .ok_or_else(|| { + CompileError::InternalOwned( + format!( + "Unknown field name '{idx_name}' for struct '{}' \ + in reassignment.", + struct_call_path.suffix.as_str(), + ), + ast_reassignment.lhs_base_name.span(), + ) + }) + .map(|(field_idx, field_type_id)| { + *cur_type_id = field_type_id; + Constant::get_uint(context, 64, field_idx) + }) + } + (ProjectionKind::TupleField { index, .. }, TypeInfo::Tuple(field_tys)) => { + *cur_type_id = field_tys[*index].type_id; + Ok(Constant::get_uint(context, 64, *index as u64)) + } + (ProjectionKind::ArrayIndex { index, .. }, TypeInfo::Array(elem_ty, _)) => { + *cur_type_id = elem_ty.type_id; + self.compile_expression_to_value(context, md_mgr, index) + } + _ => Err(CompileError::Internal( + "Unknown field in reassignment.", + idx_kind.span(), + )), + }) + }) + .collect::, _>>()?; + + // Using the type of the RHS for the GEP, rather than the final inner type of the + // aggregate, but getting the later is a bit of a pain, though the `scan` above knew it. + // Realistically the program is type checked and they should be the same. + let field_type = reassign_val.get_type(context).ok_or_else(|| { + CompileError::Internal( + "Failed to determine type of reassignment.", + ast_reassignment.lhs_base_name.span(), + ) + })?; + // Create the GEP. self.current_block .ins(context) - .insert_value(val, ty, reassign_val, field_idcs) - .add_metadatum(context, span_md_idx); - } + .get_elem_ptr(lhs_val, field_type, gep_indices) + .add_metadatum(context, span_md_idx) + }; + + self.current_block + .ins(context) + .store(lhs_ptr, reassign_val) + .add_metadatum(context, span_md_idx); Ok(Constant::get_unit(context).add_metadatum(context, span_md_idx)) } @@ -1948,7 +1952,7 @@ impl<'eng> FnCompiler<'eng> { span_md_idx: Option, ) -> Result { // Compile the RHS into a value - let rhs = self.compile_expression(context, md_mgr, rhs)?; + let rhs = self.compile_expression_to_value(context, md_mgr, rhs)?; if rhs.is_diverging(context) { return Ok(rhs); } @@ -1973,15 +1977,7 @@ impl<'eng> FnCompiler<'eng> { // Do the actual work. This is a recursive function because we want to drill down // to store each primitive type in the storage field in its own storage slot. - self.compile_storage_write( - context, - md_mgr, - ix, - &field_idcs, - &access_type, - rhs, - span_md_idx, - )?; + self.compile_storage_write(context, ix, &field_idcs, &access_type, rhs, span_md_idx)?; Ok(Constant::get_unit(context).add_metadatum(context, span_md_idx)) } @@ -1989,47 +1985,44 @@ impl<'eng> FnCompiler<'eng> { &mut self, context: &mut Context, md_mgr: &mut MetadataManager, + elem_type: &TypeId, contents: &[ty::TyExpression], span_md_idx: Option, ) -> Result { - let elem_type = if contents.is_empty() { - // A zero length array is a pointer to nothing, which is still supported by Sway. - // We're unable to get the type though it's irrelevant because it can't be indexed, so - // we'll just use Unit. - Type::get_unit(context) - } else { - convert_resolved_typeid_no_span( - self.type_engine, - self.decl_engine, - context, - &contents[0].return_type, - )? - }; - let aggregate = Type::new_array(context, elem_type, contents.len() as u64); + let elem_type = convert_resolved_typeid_no_span( + self.type_engine, + self.decl_engine, + context, + elem_type, + )?; + + let array_type = Type::new_array(context, elem_type, contents.len() as u64); - // Compile each element and insert it immediately. let temp_name = self.lexical_map.insert_anon(); let array_var = self .function - .new_local_var(context, temp_name, aggregate, None) + .new_local_var(context, temp_name, array_type, None) .map_err(|ir_error| CompileError::InternalOwned(ir_error.to_string(), Span::dummy()))?; - let mut array_value = self + let array_value = self .current_block .ins(context) .get_local(array_var) .add_metadatum(context, span_md_idx); + // Compile each element and insert it immediately. for (idx, elem_expr) in contents.iter().enumerate() { - let elem_value = self.compile_expression(context, md_mgr, elem_expr)?; + let elem_value = self.compile_expression_to_value(context, md_mgr, elem_expr)?; if elem_value.is_diverging(context) { return Ok(elem_value); } - let index_val = - Constant::get_uint(context, 64, idx as u64).add_metadatum(context, span_md_idx); - array_value = self - .current_block + let gep_val = self.current_block.ins(context).get_elem_ptr_with_idx( + array_value, + elem_type, + idx as u64, + ); + self.current_block .ins(context) - .insert_element(array_value, aggregate, elem_value, index_val) + .store(gep_val, elem_value) .add_metadatum(context, span_md_idx); } Ok(array_value) @@ -2043,47 +2036,26 @@ impl<'eng> FnCompiler<'eng> { index_expr: &ty::TyExpression, span_md_idx: Option, ) -> Result { - let array_expr_span = array_expr.span.clone(); - - let array_val = self.compile_expression(context, md_mgr, array_expr)?; + let array_val = self.compile_expression_to_ptr(context, md_mgr, array_expr)?; if array_val.is_diverging(context) { return Ok(array_val); } - let aggregate = if let Some(instruction) = array_val.get_instruction(context) { - instruction.get_aggregate(context).ok_or_else(|| { - CompileError::InternalOwned( - format!( - "Unsupported instruction as array value for index expression. \ - {instruction:?}" - ), - array_expr_span, + // Get the array type and confirm it's an array. + let array_type = array_val + .get_type(context) + .and_then(|ty| ty.get_pointee_type(context)) + .and_then(|ty| ty.is_array(context).then_some(ty)) + .ok_or_else(|| { + CompileError::Internal( + "Unsupported array value for index expression.", + array_expr.span.clone(), ) - }) - } else if let Some((agg, _)) = array_val - .get_argument_type_and_byref(context) - .filter(|(ty, _)| ty.is_array(context)) - { - Ok(agg) - } else if let Some(Constant { ty: agg, .. }) = array_val - .get_constant(context) - .filter(|c| c.ty.is_array(context)) - { - Ok(*agg) - } else if let Some(Constant { ty: agg, .. }) = array_val - .get_configurable(context) - .filter(|c| c.ty.is_array(context)) - { - Ok(*agg) - } else { - Err(CompileError::InternalOwned( - "Unsupported array value for index expression.".to_owned(), - array_expr_span, - )) - }?; + })?; let index_expr_span = index_expr.span.clone(); + // Perform a bounds check if the array index is a constant int. if let Ok(Constant { value: ConstantValue::Uint(constant_value), .. @@ -2096,7 +2068,7 @@ impl<'eng> FnCompiler<'eng> { Some(self), index_expr, ) { - let count = aggregate.get_array_len(context).unwrap(); + let count = array_type.get_array_len(context).unwrap(); if constant_value >= count { return Err(CompileError::ArrayOutOfBounds { index: constant_value, @@ -2106,15 +2078,22 @@ impl<'eng> FnCompiler<'eng> { } } - let index_val = self.compile_expression(context, md_mgr, index_expr)?; + let index_val = self.compile_expression_to_value(context, md_mgr, index_expr)?; if index_val.is_diverging(context) { return Ok(index_val); } + let elem_type = array_type.get_array_elem_type(context).ok_or_else(|| { + CompileError::Internal( + "Array type has alread confirmed to be an array. Getting elem type can't fail.", + array_expr.span.clone(), + ) + })?; + Ok(self .current_block .ins(context) - .extract_element(array_val, aggregate, index_val) + .get_elem_ptr(array_val, elem_type, vec![index_val]) .add_metadatum(context, span_md_idx)) } @@ -2128,46 +2107,62 @@ impl<'eng> FnCompiler<'eng> { // NOTE: This is a struct instantiation with initialisers for each field of a named struct. // We don't know the actual type of the struct, but the AST guarantees that the fields are // in the declared order (regardless of how they are initialised in source) so we can - // create an aggregate with the field types to construct the struct value. + // create a struct with the field types. // Compile each of the values for field initialisers, calculate their indices and also // gather their types with which to make an aggregate. - let mut inserted_values_indices = Vec::with_capacity(fields.len()); + let mut insert_values = Vec::with_capacity(fields.len()); let mut field_types = Vec::with_capacity(fields.len()); - for (insert_idx, struct_field) in fields.iter().enumerate() { - let field_ty = struct_field.value.return_type; - let insert_val = self.compile_expression(context, md_mgr, &struct_field.value)?; + for struct_field in fields.iter() { + let insert_val = + self.compile_expression_to_value(context, md_mgr, &struct_field.value)?; if insert_val.is_diverging(context) { return Ok(insert_val); } - inserted_values_indices.push((insert_val, insert_idx as u64)); - field_types.push(field_ty); + insert_values.push(insert_val); + + field_types.push(convert_resolved_typeid_no_span( + self.type_engine, + self.decl_engine, + context, + &struct_field.value.return_type, + )?); } - // Start with a temporary empty struct and then fill in the values. - let aggregate = - get_aggregate_for_types(self.type_engine, self.decl_engine, context, &field_types)?; + // Create the struct. + let struct_type = Type::new_struct(context, field_types.clone()); let temp_name = self.lexical_map.insert_anon(); let struct_var = self .function - .new_local_var(context, temp_name, aggregate, None) + .new_local_var(context, temp_name, struct_type, None) .map_err(|ir_error| CompileError::InternalOwned(ir_error.to_string(), Span::dummy()))?; - let agg_value = self + let struct_val = self .current_block .ins(context) .get_local(struct_var) .add_metadatum(context, span_md_idx); - Ok(inserted_values_indices.into_iter().fold( - agg_value, - |agg_value, (insert_val, insert_idx)| { + // Fill it in. + insert_values + .into_iter() + .zip(field_types.into_iter()) + .enumerate() + .for_each(|(insert_idx, (insert_val, field_type))| { + let gep_val = self.current_block.ins(context).get_elem_ptr_with_idx( + struct_val, + field_type, + insert_idx as u64, + ); + self.current_block .ins(context) - .insert_value(agg_value, aggregate, insert_val, vec![insert_idx]) - .add_metadatum(context, span_md_idx) - }, - )) + .store(gep_val, insert_val) + .add_metadatum(context, span_md_idx); + }); + + // Return the pointer. + Ok(struct_val) } fn compile_struct_field_expr( @@ -2179,65 +2174,50 @@ impl<'eng> FnCompiler<'eng> { ast_field: &ty::TyStructField, span_md_idx: Option, ) -> Result { - let ast_struct_expr_span = ast_struct_expr.span.clone(); - let struct_val = self.compile_expression(context, md_mgr, ast_struct_expr)?; - let aggregate = if let Some(instruction) = struct_val.get_instruction(context) { - instruction.get_aggregate(context).ok_or_else(|| { - CompileError::InternalOwned(format!( - "Unsupported instruction as struct value for field expression. {instruction:?}"), - ast_struct_expr_span) - }) - } else if let Some((agg, _)) = struct_val - .get_argument_type_and_byref(context) - .filter(|(ty, _)| ty.is_struct(context)) - { - Ok(agg) - } else if let Some(Constant { ty: agg, .. }) = struct_val - .get_constant(context) - .filter(|c| c.ty.is_struct(context)) - { - Ok(*agg) - } else if let Some(Constant { ty: agg, .. }) = struct_val - .get_configurable(context) - .filter(|c| c.ty.is_struct(context)) - { - Ok(*agg) - } else { - Err(CompileError::InternalOwned( - "Unsupported struct value for field expression.".to_owned(), - ast_struct_expr_span, - )) - }?; + let struct_val = self.compile_expression_to_ptr(context, md_mgr, ast_struct_expr)?; - let field_kind = ty::ProjectionKind::StructField { - name: ast_field.name.clone(), - }; - let field_idx = match get_struct_name_field_index_and_type( - self.type_engine, - self.decl_engine, - struct_type_id, - field_kind, - ) { - None => Err(CompileError::Internal( + // Get the struct type info, with field names. + let TypeInfo::Struct(decl_ref) = self.type_engine.get_unaliased(struct_type_id) else { + return Err(CompileError::Internal( "Unknown struct in field expression.", ast_field.span.clone(), - )), - Some((struct_name, field_idx_and_type_opt)) => match field_idx_and_type_opt { - None => Err(CompileError::InternalOwned( + )); + }; + let crate::language::ty::TyStructDecl { + call_path: struct_call_path, + fields: struct_fields, + .. + } = self.decl_engine.get_struct(&decl_ref); + + // Search for the index to the field name we're after, and its type id. + let (field_idx, field_type_id) = struct_fields + .iter() + .enumerate() + .find(|(_, field)| field.name == ast_field.name) + .map(|(idx, field)| (idx as u64, field.type_argument.type_id)) + .ok_or_else(|| { + CompileError::InternalOwned( format!( - "Unknown field name '{}' for struct '{struct_name}' in field expression.", + "Unknown field name '{}' for struct '{}' in field expression.", + struct_call_path.suffix.as_str(), ast_field.name ), ast_field.span.clone(), - )), - Some((field_idx, _field_type)) => Ok(field_idx), - }, - }?; + ) + })?; + + let field_type = convert_resolved_typeid( + self.type_engine, + self.decl_engine, + context, + &field_type_id, + &ast_field.span, + )?; Ok(self .current_block .ins(context) - .extract_value(struct_val, aggregate, vec![field_idx]) + .get_elem_ptr_with_idx(struct_val, field_type, field_idx) .add_metadatum(context, span_md_idx)) } @@ -2256,7 +2236,7 @@ impl<'eng> FnCompiler<'eng> { // we could potentially use the wrong aggregate with the same name, different module... // dunno. let span_md_idx = md_mgr.span_to_md(context, &enum_decl.span); - let aggregate = create_enum_aggregate( + let enum_type = create_tagged_union_type( self.type_engine, self.decl_engine, context, @@ -2269,38 +2249,51 @@ impl<'eng> FnCompiler<'eng> { let temp_name = self.lexical_map.insert_anon(); let enum_var = self .function - .new_local_var(context, temp_name, aggregate, None) + .new_local_var(context, temp_name, enum_type, None) .map_err(|ir_error| CompileError::InternalOwned(ir_error.to_string(), Span::dummy()))?; - let enum_val = self + let enum_ptr = self .current_block .ins(context) .get_local(enum_var) .add_metadatum(context, span_md_idx); - let agg_value = self + let u64_ty = Type::get_uint64(context); + let tag_gep_val = self .current_block .ins(context) - .insert_value(enum_val, aggregate, tag_value, vec![0]) + .get_elem_ptr_with_idx(enum_ptr, u64_ty, 0) + .add_metadatum(context, span_md_idx); + self.current_block + .ins(context) + .store(tag_gep_val, tag_value) .add_metadatum(context, span_md_idx); - // If the struct representing the enum has only one field, then that field is basically the - // tag and all the variants must have unit types, hence the absence of the union. - // Therefore, there is no need for another `insert_value` instruction here. - let field_tys = aggregate.get_field_types(context); - Ok(if field_tys.len() == 1 { - agg_value - } else { - match &contents { - None => agg_value, - Some(te) => { - // Insert the value too. - let contents_value = self.compile_expression(context, md_mgr, te)?; - self.current_block - .ins(context) - .insert_value(agg_value, aggregate, contents_value, vec![1]) - .add_metadatum(context, span_md_idx) - } - } - }) + // If the struct representing the enum has only one field, then that field is the tag and + // all the variants must have unit types, hence the absence of the union. Therefore, there + // is no need for another `store` instruction here. + let field_tys = enum_type.get_field_types(context); + if field_tys.len() != 1 && contents.is_some() { + // Insert the value too. + let contents_value = + self.compile_expression_to_value(context, md_mgr, contents.unwrap())?; + let contents_type = contents_value.get_type(context).ok_or_else(|| { + CompileError::Internal( + "Unable to get type for enum contents.", + enum_decl.span.clone(), + ) + })?; + let gep_val = self + .current_block + .ins(context) + .get_elem_ptr_with_idcs(enum_ptr, contents_type, &[1, tag as u64]) + .add_metadatum(context, span_md_idx); + self.current_block + .ins(context) + .store(gep_val, contents_value) + .add_metadatum(context, span_md_idx); + } + + // Return the pointer. + Ok(enum_ptr) } fn compile_tuple_expr( @@ -2324,7 +2317,7 @@ impl<'eng> FnCompiler<'eng> { context, &field_expr.return_type, )?; - let init_value = self.compile_expression(context, md_mgr, field_expr)?; + let init_value = self.compile_expression_to_value(context, md_mgr, field_expr)?; if init_value.is_diverging(context) { return Ok(init_value); } @@ -2332,29 +2325,37 @@ impl<'eng> FnCompiler<'eng> { init_types.push(init_type); } - let aggregate = Type::new_struct(context, init_types); + let tuple_type = Type::new_struct(context, init_types.clone()); let temp_name = self.lexical_map.insert_anon(); let tuple_var = self .function - .new_local_var(context, temp_name, aggregate, None) + .new_local_var(context, temp_name, tuple_type, None) .map_err(|ir_error| { CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) })?; - let agg_value = self + let tuple_val = self .current_block .ins(context) .get_local(tuple_var) .add_metadatum(context, span_md_idx); - Ok(init_values.into_iter().enumerate().fold( - agg_value, - |agg_value, (insert_idx, insert_val)| { + init_values + .into_iter() + .zip(init_types.into_iter()) + .enumerate() + .for_each(|(insert_idx, (field_val, field_type))| { + let gep_val = self + .current_block + .ins(context) + .get_elem_ptr_with_idx(tuple_val, field_type, insert_idx as u64) + .add_metadatum(context, span_md_idx); self.current_block .ins(context) - .insert_value(agg_value, aggregate, insert_val, vec![insert_idx as u64]) - .add_metadatum(context, span_md_idx) - }, - )) + .store(gep_val, field_val) + .add_metadatum(context, span_md_idx); + }); + + Ok(tuple_val) } } @@ -2367,27 +2368,29 @@ impl<'eng> FnCompiler<'eng> { idx: usize, span: Span, ) -> Result { - let tuple_value = self.compile_expression(context, md_mgr, tuple)?; - let ty = convert_resolved_typeid( + let tuple_value = self.compile_expression_to_ptr(context, md_mgr, tuple)?; + let tuple_type = convert_resolved_typeid( self.type_engine, self.decl_engine, context, &tuple_type, &span, )?; - if ty.is_struct(context) { - let span_md_idx = md_mgr.span_to_md(context, &span); - Ok(self - .current_block - .ins(context) - .extract_value(tuple_value, ty, vec![idx as u64]) - .add_metadatum(context, span_md_idx)) - } else { - Err(CompileError::Internal( - "Invalid (non-aggregate?) tuple type for TupleElemAccess.", - span, - )) - } + tuple_type + .get_field_type(context, idx as u64) + .map(|field_type| { + let span_md_idx = md_mgr.span_to_md(context, &span); + self.current_block + .ins(context) + .get_elem_ptr_with_idx(tuple_value, field_type, idx as u64) + .add_metadatum(context, span_md_idx) + }) + .ok_or_else(|| { + CompileError::Internal( + "Invalid (non-aggregate?) tuple type for TupleElemAccess.", + span, + ) + }) } fn compile_storage_access( @@ -2441,9 +2444,31 @@ impl<'eng> FnCompiler<'eng> { }| { // Take the optional initialiser, map it to an Option>, // transpose that to Result> and map that to an AsmArg. + // + // Here we need to compile based on the Sway 'copy-type' vs 'ref-type' since + // ASM args aren't explicitly typed, and if we send in a temporary it might + // be mutated and then discarded. It *must* be a ptr to *the* ref-type value, + // *or* the value of the copy-type value. initializer .as_ref() - .map(|init_expr| self.compile_expression(context, md_mgr, init_expr)) + .map(|init_expr| { + self.compile_expression(context, md_mgr, init_expr) + .map(|init_val| { + if init_val + .get_type(context) + .map_or(false, |ty| ty.is_ptr(context)) + && self + .type_engine + .get_unaliased(init_expr.return_type) + .is_copy_type() + { + // It's a pointer to a copy type. We need to derefence it. + self.current_block.ins(context).load(init_val) + } else { + init_val + } + }) + }) .transpose() .map(|init| AsmArg { name: name.clone(), @@ -2493,111 +2518,119 @@ impl<'eng> FnCompiler<'eng> { ty: &Type, span_md_idx: Option, ) -> Result { - match ty { - ty if ty.is_struct(context) => { - let temp_name = self.lexical_map.insert_anon(); - let struct_var = self - .function - .new_local_var(context, temp_name, *ty, None) - .map_err(|ir_error| { - CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) - })?; - let mut struct_val = self - .current_block - .ins(context) - .get_local(struct_var) - .add_metadatum(context, span_md_idx); - - let fields = ty.get_field_types(context); - for (field_idx, field_type) in fields.into_iter().enumerate() { - let field_idx = field_idx as u64; - - // Recurse. The base case is for primitive types that fit in a single storage slot. - let mut new_indices = indices.to_owned(); - new_indices.push(field_idx); + if ty.is_struct(context) { + let temp_name = self.lexical_map.insert_anon(); + let struct_var = self + .function + .new_local_var(context, temp_name, *ty, None) + .map_err(|ir_error| { + CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) + })?; + let struct_val = self + .current_block + .ins(context) + .get_local(struct_var) + .add_metadatum(context, span_md_idx); - let val_to_insert = self.compile_storage_read( - context, - _md_mgr, - ix, - &new_indices, - &field_type, - span_md_idx, - )?; + let fields = ty.get_field_types(context); + for (field_idx, field_type) in fields.into_iter().enumerate() { + let field_idx = field_idx as u64; - // Insert the loaded value to the aggregate at the given index - struct_val = self - .current_block - .ins(context) - .insert_value(struct_val, *ty, val_to_insert, vec![field_idx]) - .add_metadatum(context, span_md_idx); - } - Ok(struct_val) - } - _ => { - let storage_key = get_storage_key(ix, indices); + // Recurse. The base case is for primitive types that fit in a single storage slot. + let mut new_indices = indices.to_owned(); + new_indices.push(field_idx); - // New name for the key - let mut key_name = format!("{}{}", "key_for_", ix.to_usize()); - for ix in indices { - key_name = format!("{key_name}_{ix}"); - } - let alias_key_name = self.lexical_map.insert(key_name.as_str().to_owned()); - - // Local pointer for the key - let key_var = self - .function - .new_local_var(context, alias_key_name, Type::get_b256(context), None) - .map_err(|ir_error| { - CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) - })?; - - // Const value for the key from the hash - let const_key = - convert_literal_to_value(context, &Literal::B256(storage_key.into())) - .add_metadatum(context, span_md_idx); + let val_to_insert = self.compile_storage_read( + context, + _md_mgr, + ix, + &new_indices, + &field_type, + span_md_idx, + )?; - // Convert the key pointer to a value using get_ptr - let key_val = self + // Insert the loaded value to the aggregate at the given index + let gep_val = self .current_block .ins(context) - .get_local(key_var) + .get_elem_ptr_with_idx(struct_val, field_type, field_idx) .add_metadatum(context, span_md_idx); - - // Store the const hash value to the key pointer value self.current_block .ins(context) - .store(key_val, const_key) + .store(gep_val, val_to_insert) .add_metadatum(context, span_md_idx); + } - match ty.get_content(context) { - TypeContent::Array(..) => Err(CompileError::Internal( - "Arrays in storage have not been implemented yet.", - Span::dummy(), - )), - TypeContent::Slice => Err(CompileError::Internal( - "Slices in storage have not been implemented yet.", - Span::dummy(), - )), - TypeContent::B256 => { - self.compile_b256_storage_read(context, ix, indices, &key_val, span_md_idx) - } - TypeContent::Bool | TypeContent::Uint(_) => { - self.compile_uint_or_bool_storage_read(context, &key_val, ty, span_md_idx) - } - TypeContent::String(_) | TypeContent::Union(_) => self - .compile_union_or_string_storage_read( - context, - ix, - indices, - &key_val, - ty, - span_md_idx, - ), - TypeContent::Struct(_) => unreachable!("structs are already handled!"), - TypeContent::Unit => { - Ok(Constant::get_unit(context).add_metadatum(context, span_md_idx)) - } + Ok(self + .current_block + .ins(context) + .load(struct_val) + .add_metadatum(context, span_md_idx)) + } else { + let storage_key = get_storage_key(ix, indices); + + // New name for the key + let mut key_name = format!("{}{}", "key_for_", ix.to_usize()); + for ix in indices { + key_name = format!("{key_name}_{ix}"); + } + let alias_key_name = self.lexical_map.insert(key_name.as_str().to_owned()); + + // Local pointer for the key + let key_var = self + .function + .new_local_var(context, alias_key_name, Type::get_b256(context), None) + .map_err(|ir_error| { + CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) + })?; + + // Const value for the key from the hash + let const_key = convert_literal_to_value(context, &Literal::B256(storage_key.into())) + .add_metadatum(context, span_md_idx); + + let key_ptr = self + .current_block + .ins(context) + .get_local(key_var) + .add_metadatum(context, span_md_idx); + + // Store the const hash value to the key pointer value + self.current_block + .ins(context) + .store(key_ptr, const_key) + .add_metadatum(context, span_md_idx); + + match ty.get_content(context) { + TypeContent::Array(..) => Err(CompileError::Internal( + "Arrays in storage have not been implemented yet.", + Span::dummy(), + )), + TypeContent::Slice => Err(CompileError::Internal( + "Slices in storage are not valid.", + Span::dummy(), + )), + TypeContent::Pointer(_) => Err(CompileError::Internal( + "Pointers in storage are not valid.", + Span::dummy(), + )), + TypeContent::B256 => { + self.compile_b256_storage_read(context, ix, indices, &key_ptr, span_md_idx) + } + TypeContent::Bool | TypeContent::Uint(_) => { + self.compile_uint_or_bool_storage_read(context, &key_ptr, ty, span_md_idx) + } + TypeContent::String(_) | TypeContent::Union(_) => self + .compile_union_or_string_storage_read( + context, + ix, + indices, + &key_ptr, + ty, + span_md_idx, + ), + TypeContent::Struct(_) => unreachable!("structs are already handled!"), + TypeContent::Unit => { + Ok(Constant::get_unit(context).add_metadatum(context, span_md_idx)) } } } @@ -2607,7 +2640,6 @@ impl<'eng> FnCompiler<'eng> { fn compile_storage_write( &mut self, context: &mut Context, - _md_mgr: &mut MetadataManager, ix: &StateIndex, indices: &[u64], ty: &Type, @@ -2616,6 +2648,22 @@ impl<'eng> FnCompiler<'eng> { ) -> Result<(), CompileError> { match ty { ty if ty.is_struct(context) => { + // Create a temporary local struct, store the RHS to it, and read each field from + // there. + let temp_name = self.lexical_map.insert_anon(); + let tmp_struct_var = self + .function + .new_local_var(context, temp_name, *ty, None) + .map_err(|ir_error| { + CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) + })?; + let tmp_struct_val = self + .current_block + .ins(context) + .get_local(tmp_struct_var) + .add_metadatum(context, span_md_idx); + self.current_block.ins(context).store(tmp_struct_val, rhs); + let fields = ty.get_field_types(context); for (field_idx, field_type) in fields.into_iter().enumerate() { let field_idx = field_idx as u64; @@ -2625,15 +2673,19 @@ impl<'eng> FnCompiler<'eng> { new_indices.push(field_idx); // Extract the value from the aggregate at the given index + let gep_val = self + .current_block + .ins(context) + .get_elem_ptr_with_idx(tmp_struct_val, field_type, field_idx) + .add_metadatum(context, span_md_idx); let rhs = self .current_block .ins(context) - .extract_value(rhs, *ty, vec![field_idx]) + .load(gep_val) .add_metadatum(context, span_md_idx); self.compile_storage_write( context, - _md_mgr, ix, &new_indices, &field_type, @@ -2685,7 +2737,11 @@ impl<'eng> FnCompiler<'eng> { Span::dummy(), )), TypeContent::Slice => Err(CompileError::Internal( - "Slices in storage have not been implemented yet.", + "Slices in storage are not valid.", + Span::dummy(), + )), + TypeContent::Pointer(_) => Err(CompileError::Internal( + "Pointers in storage are not valid.", Span::dummy(), )), TypeContent::B256 => self.compile_b256_storage_write( @@ -2783,7 +2839,7 @@ impl<'eng> FnCompiler<'eng> { .map_err(|ir_error| CompileError::InternalOwned(ir_error.to_string(), Span::dummy()))?; // Convert the local pointer created to a value using get_ptr - let local_val = self + let local_ptr = self .current_block .ins(context) .get_local(local_var) @@ -2792,8 +2848,14 @@ impl<'eng> FnCompiler<'eng> { let one_value = convert_literal_to_value(context, &Literal::U64(1)); self.current_block .ins(context) - .state_load_quad_word(local_val, *key_ptr_val, one_value) + .state_load_quad_word(local_ptr, *key_ptr_val, one_value) + .add_metadatum(context, span_md_idx); + let local_val = self + .current_block + .ins(context) + .load(local_ptr) .add_metadatum(context, span_md_idx); + Ok(local_val) } @@ -2849,7 +2911,7 @@ impl<'eng> FnCompiler<'eng> { ix: &StateIndex, indices: &[u64], key_val: &Value, - r#type: &Type, + ty: &Type, span_md_idx: Option, ) -> Result { // First, create a name for the value to load from or store to @@ -2866,7 +2928,7 @@ impl<'eng> FnCompiler<'eng> { // Create an array of `b256` that will hold the value to store into storage // or the value loaded from storage. The array has to fit the whole type. - let number_of_elements = (ir_type_size_in_bytes(context, r#type) + 31) / 32; + let number_of_elements = (ir_type_size_in_bytes(context, ty) + 31) / 32; let b256_array_type = Type::new_array(context, Type::get_b256(context), number_of_elements); // Local pointer to hold the array of b256s @@ -2881,12 +2943,13 @@ impl<'eng> FnCompiler<'eng> { .ins(context) .get_local(local_var) .add_metadatum(context, span_md_idx); + let ptr_ty = Type::new_ptr(context, *ty); let final_val = self .current_block .ins(context) - .cast_ptr(local_val, *r#type, 0) + .cast_ptr(local_val, ptr_ty) .add_metadatum(context, span_md_idx); - let b256_ty = Type::get_b256(context); + let b256_ptr_ty = Type::new_ptr(context, Type::get_b256(context)); if number_of_elements > 0 { // Get the b256 from the array at index iter @@ -2898,7 +2961,7 @@ impl<'eng> FnCompiler<'eng> { let indexed_value_val_b256 = self .current_block .ins(context) - .cast_ptr(value_val_b256, b256_ty, 0) + .cast_ptr(value_val_b256, b256_ptr_ty) .add_metadatum(context, span_md_idx); let count_value = convert_literal_to_value(context, &Literal::U64(number_of_elements)); @@ -2908,7 +2971,11 @@ impl<'eng> FnCompiler<'eng> { .add_metadatum(context, span_md_idx); } - Ok(final_val) + Ok(self + .current_block + .ins(context) + .load(final_val) + .add_metadatum(context, span_md_idx)) } #[allow(clippy::too_many_arguments)] @@ -2918,7 +2985,7 @@ impl<'eng> FnCompiler<'eng> { ix: &StateIndex, indices: &[u64], key_val: &Value, - r#type: &Type, + ty: &Type, rhs: Value, span_md_idx: Option, ) -> Result<(), CompileError> { @@ -2936,7 +3003,7 @@ impl<'eng> FnCompiler<'eng> { // Create an array of `b256` that will hold the value to store into storage // or the value loaded from storage. The array has to fit the whole type. - let number_of_elements = (ir_type_size_in_bytes(context, r#type) + 31) / 32; + let number_of_elements = (ir_type_size_in_bytes(context, ty) + 31) / 32; let b256_array_type = Type::new_array(context, Type::get_b256(context), number_of_elements); // Local pointer to hold the array of b256s @@ -2952,10 +3019,11 @@ impl<'eng> FnCompiler<'eng> { .ins(context) .get_local(local_var) .add_metadatum(context, span_md_idx); + let ptr_ty = Type::new_ptr(context, *ty); let final_val = self .current_block .ins(context) - .cast_ptr(local_val, *r#type, 0) + .cast_ptr(local_val, ptr_ty) .add_metadatum(context, span_md_idx); // Store the value to the local pointer created for rhs @@ -2964,7 +3032,7 @@ impl<'eng> FnCompiler<'eng> { .store(final_val, rhs) .add_metadatum(context, span_md_idx); - let b256_ty = Type::get_b256(context); + let b256_ptr_ty = Type::new_ptr(context, Type::get_b256(context)); if number_of_elements > 0 { // Get the b256 from the array at index iter let value_ptr_val_b256 = self @@ -2975,7 +3043,7 @@ impl<'eng> FnCompiler<'eng> { let indexed_value_ptr_val_b256 = self .current_block .ins(context) - .cast_ptr(value_ptr_val_b256, b256_ty, 0) + .cast_ptr(value_ptr_val_b256, b256_ptr_ty) .add_metadatum(context, span_md_idx); // Finally, just call state_load_quad_word/state_store_quad_word diff --git a/sway-core/src/ir_generation/types.rs b/sway-core/src/ir_generation/types.rs index 3d1f2889310..5e124182fc2 100644 --- a/sway-core/src/ir_generation/types.rs +++ b/sway-core/src/ir_generation/types.rs @@ -11,7 +11,7 @@ use sway_error::error::CompileError; use sway_ir::{Context, Type}; use sway_types::span::Spanned; -pub(super) fn create_enum_aggregate( +pub(super) fn create_tagged_union_type( type_engine: &TypeEngine, decl_engine: &DeclEngine, context: &mut Context, @@ -69,7 +69,7 @@ pub(super) fn create_array_aggregate( Ok(Type::new_array(context, element_type, count)) } -pub(super) fn get_aggregate_for_types( +pub(super) fn get_struct_for_types( type_engine: &TypeEngine, decl_engine: &DeclEngine, context: &mut Context, diff --git a/sway-core/src/language/ty/expression/expression.rs b/sway-core/src/language/ty/expression/expression.rs index cf3dede615d..914153f37df 100644 --- a/sway-core/src/language/ty/expression/expression.rs +++ b/sway-core/src/language/ty/expression/expression.rs @@ -183,7 +183,10 @@ impl CollectTypesMetadata for TyExpression { errors )); } - Array { contents } => { + Array { + elem_type: _, + contents, + } => { for content in contents.iter() { res.append(&mut check!( content.collect_types_metadata(ctx), diff --git a/sway-core/src/language/ty/expression/expression_variant.rs b/sway-core/src/language/ty/expression/expression_variant.rs index adad3bf2c19..1ef1aafe074 100644 --- a/sway-core/src/language/ty/expression/expression_variant.rs +++ b/sway-core/src/language/ty/expression/expression_variant.rs @@ -43,6 +43,7 @@ pub enum TyExpressionVariant { fields: Vec, }, Array { + elem_type: TypeId, contents: Vec, }, ArrayIndex { @@ -194,9 +195,11 @@ impl PartialEqWithEngines for TyExpressionVariant { ( Self::Array { contents: l_contents, + .. }, Self::Array { contents: r_contents, + .. }, ) => l_contents.eq(r_contents, engines), ( @@ -420,7 +423,10 @@ impl HashWithEngines for TyExpressionVariant { Self::Tuple { fields } => { fields.hash(state, engines); } - Self::Array { contents } => { + Self::Array { + contents, + elem_type: _, + } => { contents.hash(state, engines); } Self::ArrayIndex { prefix, index } => { @@ -587,9 +593,15 @@ impl SubstTypes for TyExpressionVariant { Tuple { fields } => fields .iter_mut() .for_each(|x| x.subst(type_mapping, engines)), - Array { contents } => contents - .iter_mut() - .for_each(|x| x.subst(type_mapping, engines)), + Array { + ref mut elem_type, + contents, + } => { + elem_type.subst(type_mapping, engines); + contents + .iter_mut() + .for_each(|x| x.subst(type_mapping, engines)) + } ArrayIndex { prefix, index } => { (*prefix).subst(type_mapping, engines); (*index).subst(type_mapping, engines); @@ -719,9 +731,15 @@ impl ReplaceSelfType for TyExpressionVariant { Tuple { fields } => fields .iter_mut() .for_each(|x| x.replace_self_type(engines, self_type)), - Array { contents } => contents - .iter_mut() - .for_each(|x| x.replace_self_type(engines, self_type)), + Array { + ref mut elem_type, + contents, + } => { + elem_type.replace_self_type(engines, self_type); + contents + .iter_mut() + .for_each(|x| x.replace_self_type(engines, self_type)) + } ArrayIndex { prefix, index } => { (*prefix).replace_self_type(engines, self_type); (*index).replace_self_type(engines, self_type); @@ -846,7 +864,10 @@ impl ReplaceDecls for TyExpressionVariant { Tuple { fields } => fields .iter_mut() .for_each(|x| x.replace_decls(decl_mapping, engines)), - Array { contents } => contents + Array { + elem_type: _, + contents, + } => contents .iter_mut() .for_each(|x| x.replace_decls(decl_mapping, engines)), ArrayIndex { prefix, index } => { @@ -1106,7 +1127,10 @@ impl TyExpressionVariant { .iter() .flat_map(|expr| expr.gather_return_statements()) .collect(), - TyExpressionVariant::Array { contents } => contents + TyExpressionVariant::Array { + elem_type: _, + contents, + } => contents .iter() .flat_map(|expr| expr.gather_return_statements()) .collect(), diff --git a/sway-core/src/lib.rs b/sway-core/src/lib.rs index b72a124ae6a..b129aaeb90b 100644 --- a/sway-core/src/lib.rs +++ b/sway-core/src/lib.rs @@ -36,7 +36,8 @@ use sway_ast::AttributeDecl; use sway_error::handler::{ErrorEmitted, Handler}; use sway_ir::{ create_o1_pass_group, register_known_passes, Context, Kind, Module, PassManager, - MODULEPRINTER_NAME, + ARGDEMOTION_NAME, CONSTDEMOTION_NAME, DCE_NAME, MEMCPYOPT_NAME, MISCDEMOTION_NAME, + MODULEPRINTER_NAME, RETDEMOTION_NAME, }; use sway_types::constants::DOC_COMMENT_ATTRIBUTE_NAME; use transform::{Attribute, AttributeArg, AttributeKind, AttributesMap}; @@ -366,8 +367,7 @@ pub fn parsed_to_ast( errors.extend(cfa_res.errors); warnings.extend(cfa_res.warnings); - // Evaluate const declarations, - // to allow storage slots initializion with consts. + // Evaluate const declarations, to allow storage slots initializion with consts. let mut ctx = Context::default(); let mut md_mgr = MetadataManager::default(); let module = Module::new(&mut ctx, Kind::Contract); @@ -562,6 +562,29 @@ pub(crate) fn compile_ast_to_ir_to_asm( let mut pass_mgr = PassManager::default(); register_known_passes(&mut pass_mgr); let mut pass_group = create_o1_pass_group(matches!(tree_type, TreeType::Predicate)); + + // Target specific transforms should be moved into something more configured. + if build_config.build_target == BuildTarget::Fuel { + // FuelVM target specific transforms. + // + // Demote large by-value constants, arguments and return values to by-reference values + // using temporaries. + pass_group.append_pass(CONSTDEMOTION_NAME); + pass_group.append_pass(ARGDEMOTION_NAME); + pass_group.append_pass(RETDEMOTION_NAME); + pass_group.append_pass(MISCDEMOTION_NAME); + + // Convert loads and stores to mem_copys where possible. + pass_group.append_pass(MEMCPYOPT_NAME); + + // Run a DCE and simplify-cfg to clean up any obsolete instructions. + pass_group.append_pass(DCE_NAME); + // XXX Oh no, if we add simplifycfg here it unearths a bug in the register allocator which + // manifests in the `should_pass/language/while_loops` test. Fixing the register allocator + // is a very high priority but isn't a part of this change. + //pass_group.append_pass(SIMPLIFYCFG_NAME); + } + if build_config.print_ir { pass_group.append_pass(MODULEPRINTER_NAME); } diff --git a/sway-core/src/monomorphize/gather/expression.rs b/sway-core/src/monomorphize/gather/expression.rs index 4dc31f47b6e..0c32717b2c9 100644 --- a/sway-core/src/monomorphize/gather/expression.rs +++ b/sway-core/src/monomorphize/gather/expression.rs @@ -43,7 +43,10 @@ pub(crate) fn gather_from_exp_inner( .iter() .try_for_each(|field| gather_from_exp(ctx.by_ref(), handler, field))?; } - ty::TyExpressionVariant::Array { contents: _ } => { + ty::TyExpressionVariant::Array { + contents: _, + elem_type: _, + } => { todo!(); // contents // .iter() diff --git a/sway-core/src/monomorphize/instruct/expression.rs b/sway-core/src/monomorphize/instruct/expression.rs index fbb4f40901f..8f3af5797f3 100644 --- a/sway-core/src/monomorphize/instruct/expression.rs +++ b/sway-core/src/monomorphize/instruct/expression.rs @@ -43,7 +43,10 @@ pub(crate) fn instruct_exp_inner( .iter() .try_for_each(|field| instruct_exp(ctx.by_ref(), handler, field))?; } - ty::TyExpressionVariant::Array { contents: _ } => { + ty::TyExpressionVariant::Array { + contents: _, + elem_type: _, + } => { todo!(); // contents // .iter() diff --git a/sway-core/src/semantic_analysis/ast_node/declaration/impl_trait.rs b/sway-core/src/semantic_analysis/ast_node/declaration/impl_trait.rs index 06796781db4..1d9ba0a45c0 100644 --- a/sway-core/src/semantic_analysis/ast_node/declaration/impl_trait.rs +++ b/sway-core/src/semantic_analysis/ast_node/declaration/impl_trait.rs @@ -274,7 +274,10 @@ impl ty::TyImplTrait { || expr_contains_get_storage_index(decl_engine, expr2, access_span)? } ty::TyExpressionVariant::Tuple { fields: exprvec } - | ty::TyExpressionVariant::Array { contents: exprvec } => { + | ty::TyExpressionVariant::Array { + elem_type: _, + contents: exprvec, + } => { for f in exprvec.iter() { let b = expr_contains_get_storage_index(decl_engine, f, access_span)?; if b { diff --git a/sway-core/src/semantic_analysis/ast_node/expression/typed_expression.rs b/sway-core/src/semantic_analysis/ast_node/expression/typed_expression.rs index 542c3d2eadd..5d8721a4a03 100644 --- a/sway-core/src/semantic_analysis/ast_node/expression/typed_expression.rs +++ b/sway-core/src/semantic_analysis/ast_node/expression/typed_expression.rs @@ -1435,6 +1435,7 @@ impl ty::TyExpression { return ok( ty::TyExpression { expression: ty::TyExpressionVariant::Array { + elem_type: unknown_type, contents: Vec::new(), }, return_type: type_engine.insert( @@ -1496,6 +1497,7 @@ impl ty::TyExpression { ok( ty::TyExpression { expression: ty::TyExpressionVariant::Array { + elem_type, contents: typed_contents, }, return_type: type_engine.insert( diff --git a/sway-core/src/semantic_analysis/cei_pattern_analysis.rs b/sway-core/src/semantic_analysis/cei_pattern_analysis.rs index b285f6fd6c8..1843afa64ea 100644 --- a/sway-core/src/semantic_analysis/cei_pattern_analysis.rs +++ b/sway-core/src/semantic_analysis/cei_pattern_analysis.rs @@ -296,7 +296,11 @@ fn analyze_expression( } set_union(intr_effs, args_effs) } - Tuple { fields: exprs } | Array { contents: exprs } => { + Tuple { fields: exprs } + | Array { + elem_type: _, + contents: exprs, + } => { // assuming left-to-right fields/elements evaluation analyze_expressions(engines, exprs.iter().collect(), block_name, warnings) } @@ -542,9 +546,11 @@ fn effects_of_expression(engines: Engines<'_>, expr: &ty::TyExpression) -> HashS effs.extend(rhs_effs); effs } - Tuple { fields: exprs } | Array { contents: exprs } => { - effects_of_expressions(engines, exprs) - } + Tuple { fields: exprs } + | Array { + elem_type: _, + contents: exprs, + } => effects_of_expressions(engines, exprs), StructExpression { fields, .. } => effects_of_struct_expressions(engines, fields), CodeBlock(codeblock) => effects_of_codeblock(engines, codeblock), MatchExp { desugared, .. } => effects_of_expression(engines, desugared), diff --git a/sway-core/src/semantic_analysis/storage_only_types.rs b/sway-core/src/semantic_analysis/storage_only_types.rs index a963a4c6ae8..75d983e6873 100644 --- a/sway-core/src/semantic_analysis/storage_only_types.rs +++ b/sway-core/src/semantic_analysis/storage_only_types.rs @@ -53,7 +53,10 @@ fn expr_validate(engines: Engines<'_>, expr: &ty::TyExpression) -> CompileResult .. }) | ty::TyExpressionVariant::Tuple { fields: exprvec } - | ty::TyExpressionVariant::Array { contents: exprvec } => { + | ty::TyExpressionVariant::Array { + elem_type: _, + contents: exprvec, + } => { for f in exprvec { check!(expr_validate(engines, f), continue, warnings, errors) } diff --git a/sway-core/src/type_system/engine.rs b/sway-core/src/type_system/engine.rs index 31ee7b78fd0..f14d272472d 100644 --- a/sway-core/src/type_system/engine.rs +++ b/sway-core/src/type_system/engine.rs @@ -51,6 +51,17 @@ impl TypeEngine { self.slab.get(id.index()) } + /// Performs a lookup of `id` into the [TypeEngine] recursing when finding a + /// [TypeInfo::Alias]. + pub fn get_unaliased(&self, id: TypeId) -> TypeInfo { + // A slight infinite loop concern if we somehow have self-referential aliases, but that + // shouldn't be possible. + match self.slab.get(id.index()) { + TypeInfo::Alias { ty, .. } => self.get_unaliased(ty.type_id), + ty_info => ty_info, + } + } + /// Denotes the given [TypeId] as being used with storage. pub(crate) fn set_type_as_storage_only(&self, id: TypeId) { self.storage_only_types.insert(self.get(id)); diff --git a/sway-core/src/type_system/info.rs b/sway-core/src/type_system/info.rs index 994b7ce0d48..b0283b6e95e 100644 --- a/sway-core/src/type_system/info.rs +++ b/sway-core/src/type_system/info.rs @@ -831,12 +831,22 @@ impl TypeInfo { } pub fn is_copy_type(&self) -> bool { + // XXX This is FuelVM specific. We need to find the users of this method and determine + // whether they're actually asking 'is_aggregate()` or something else. matches!( self, TypeInfo::Boolean | TypeInfo::UnsignedInteger(_) | TypeInfo::RawUntypedPtr ) || self.is_unit() } + pub fn is_aggregate_type(&self) -> bool { + match self { + TypeInfo::Struct { .. } | TypeInfo::Enum { .. } | TypeInfo::Array { .. } => true, + TypeInfo::Tuple { .. } => !self.is_unit(), + _ => false, + } + } + pub(crate) fn apply_type_arguments( self, type_arguments: Vec, diff --git a/sway-ir/src/asm.rs b/sway-ir/src/asm.rs index ba02873ce28..a8754dae7a5 100644 --- a/sway-ir/src/asm.rs +++ b/sway-ir/src/asm.rs @@ -72,6 +72,11 @@ impl AsmBlock { context.asm_blocks[self.0].return_type } + /// Change the [`AsmBlock`] return type. + pub fn set_type(&self, context: &mut Context, new_ret_type: Type) { + context.asm_blocks[self.0].return_type = new_ret_type + } + /// Get a reference to the [`AsmBlockContent`] for this ASM block. pub fn get_content<'a>(&self, context: &'a Context) -> &'a AsmBlockContent { &context.asm_blocks[self.0] diff --git a/sway-ir/src/block.rs b/sway-ir/src/block.rs index 3a4b4da649b..dba996f7c8e 100644 --- a/sway-ir/src/block.rs +++ b/sway-ir/src/block.rs @@ -48,9 +48,6 @@ pub struct BlockArgument { /// idx'th argument of the block. pub idx: usize, pub ty: Type, - - /// Temporary flag to mark an arg as passed by reference until we reintroduce pointers. - pub by_ref: bool, } impl BlockArgument { @@ -92,7 +89,7 @@ impl Block { context.blocks[self.0].function } - /// Create a new [`InstructionIterator`] to more easily append instructions to this block. + /// Create a new [`InstructionInserter`] to more easily append instructions to this block. pub fn ins<'a>(&self, context: &'a mut Context) -> InstructionInserter<'a> { InstructionInserter::new(context, *self) } @@ -103,6 +100,14 @@ impl Block { context.blocks[self.0].label.clone() } + /// Set the label of this block. If the label isn't unique it will be made so. + pub fn set_label(&self, context: &mut Context, new_label: Option