diff --git a/sway-core/src/asm_generation/abstract_instruction_set.rs b/sway-core/src/asm_generation/abstract_instruction_set.rs index 96fa185af10..c3ee9b29712 100644 --- a/sway-core/src/asm_generation/abstract_instruction_set.rs +++ b/sway-core/src/asm_generation/abstract_instruction_set.rs @@ -1,11 +1,12 @@ use crate::{ - asm_generation::{register_allocator, DataSection, InstructionSet, RegisterSequencer}, + asm_generation::{register_allocator, AllocatedAbstractInstructionSet, RegisterSequencer}, asm_lang::{ - allocated_ops::AllocatedOp, Label, Op, OrganizationalOp, RealizedOp, VirtualImmediate12, - VirtualImmediate18, VirtualImmediate24, VirtualOp, + allocated_ops::AllocatedOp, AllocatedAbstractOp, Op, OrganizationalOp, RealizedOp, + VirtualOp, VirtualRegister, }, }; -use std::{collections::HashMap, fmt}; + +use std::{collections::BTreeSet, fmt}; use either::Either; @@ -17,184 +18,102 @@ pub struct AbstractInstructionSet { } impl AbstractInstructionSet { + pub(crate) fn optimize(self) -> AbstractInstructionSet { + self.remove_sequential_jumps() + .remove_redundant_moves() + .remove_unused_ops() + } + /// Removes any jumps that jump to the subsequent line - pub(crate) fn remove_sequential_jumps(&self) -> AbstractInstructionSet { - let mut buf = vec![]; - for i in 0..self.ops.len() - 1 { - if let Op { - opcode: Either::Right(OrganizationalOp::Jump(ref label)), - .. - } = self.ops[i] - { - if let Op { - opcode: Either::Right(OrganizationalOp::Label(ref label2)), - .. - } = self.ops[i + 1] - { - if label == label2 { - // this is a jump to the next line - // omit these by doing nothing - continue; - } - } - } - buf.push(self.ops[i].clone()); + fn remove_sequential_jumps(mut self) -> AbstractInstructionSet { + let dead_jumps: Vec<_> = self + .ops + .windows(2) + .enumerate() + .filter_map(|(idx, ops)| match (&ops[0].opcode, &ops[1].opcode) { + ( + Either::Right(OrganizationalOp::Jump(dst_label)), + Either::Right(OrganizationalOp::Label(label)), + ) if dst_label == label => Some(idx), + _otherwise => None, + }) + .collect(); + + // Replace the dead jumps with NOPs, as it's cheaper. + for idx in dead_jumps { + self.ops[idx] = Op { + opcode: Either::Left(VirtualOp::NOOP), + comment: "removed redundant JUMP".into(), + owning_span: None, + }; } - // the last item cannot sequentially jump by definition so we add it in here - if let Some(x) = self.ops.last() { - buf.push(x.clone()) - }; - - // scan through the jumps and remove any labels that are unused - // this could of course be N instead of 2N if i did this in the above for loop. - // However, the sweep for unused labels is inevitable regardless of the above phase - // so might as well do it here. - let mut buf2 = vec![]; - for op in &buf { - match op.opcode { - Either::Right(OrganizationalOp::Label(ref label)) => { - if label_is_used(&buf, label) { - buf2.push(op.clone()); + + self + } + + fn remove_redundant_moves(mut self) -> AbstractInstructionSet { + // This has a lot of room for improvement. + // + // For now it is just removing MOVEs to registers which are _never_ used. It doesn't + // analyse control flow or other redundancies. Some obvious improvements are: + // + // - Perform a control flow analysis to remove MOVEs to registers which are not used + // _after_ the MOVE. + // + // - Remove the redundant use of temporaries. E.g.: + // MOVE t, a MOVE b, a + // MOVE b, t => USE b + // USE b + + loop { + // Gather all the uses for each register. + let uses: BTreeSet<&VirtualRegister> = + self.ops.iter().fold(BTreeSet::new(), |mut acc, op| { + acc.append(&mut op.use_registers()); + acc + }); + + // Loop again and find MOVEs which have a non-constant destination which is never used. + let mut dead_moves = Vec::new(); + for (idx, op) in self.ops.iter().enumerate() { + if let Either::Left(VirtualOp::MOVE( + dst_reg @ VirtualRegister::Virtual(_), + _src_reg, + )) = &op.opcode + { + if !uses.contains(dst_reg) { + dead_moves.push(idx); } } - _ => buf2.push(op.clone()), } - } - AbstractInstructionSet { ops: buf2 } - } + if dead_moves.is_empty() { + break; + } - /// Runs two passes -- one to get the instruction offsets of the labels - /// and one to replace the labels in the organizational ops - pub(crate) fn realize_labels( - self, - data_section: &DataSection, - ) -> RealizedAbstractInstructionSet { - let mut label_namespace: HashMap<&Label, u64> = Default::default(); - let mut offset_map = vec![]; - let mut counter = 0; - for op in &self.ops { - offset_map.push(counter); - match op.opcode { - Either::Right(OrganizationalOp::Label(ref lab)) => { - label_namespace.insert(lab, counter); - } - // A special case for LWDataId which may be 1 or 2 ops, depending on the source size. - Either::Left(VirtualOp::LWDataId(_, ref data_id)) => { - let type_of_data = data_section.type_of_data(data_id).expect( - "Internal miscalculation in data section -- data id did not match up to any actual data", - ); - counter += if type_of_data.is_copy_type() { 1 } else { 2 }; - } - // these ops will end up being exactly one op, so the counter goes up one - Either::Right(OrganizationalOp::Jump(..)) - | Either::Right(OrganizationalOp::JumpIfNotEq(..)) - | Either::Right(OrganizationalOp::JumpIfNotZero(..)) - | Either::Left(_) => { - counter += 1; - } - Either::Right(OrganizationalOp::Comment) => (), - Either::Right(OrganizationalOp::DataSectionOffsetPlaceholder) => { - // If the placeholder is 32 bits, this is 1. if 64, this should be 2. We use LW - // to load the data, which loads a whole word, so for now this is 2. - counter += 2 - } + // Replace the dead moves with NOPs, as it's cheaper. + for idx in dead_moves { + self.ops[idx] = Op { + opcode: Either::Left(VirtualOp::NOOP), + comment: "removed redundant MOVE".into(), + owning_span: None, + }; } } - let mut realized_ops = vec![]; - for ( - ix, - Op { - opcode, - owning_span, - comment, - }, - ) in self.ops.clone().into_iter().enumerate() - { - let offset = offset_map[ix]; - match opcode { - Either::Left(op) => realized_ops.push(RealizedOp { - opcode: op, - owning_span, - comment, - offset, - }), - Either::Right(org_op) => match org_op { - OrganizationalOp::Jump(ref lab) => { - let imm = VirtualImmediate24::new_unchecked( - *label_namespace.get(lab).unwrap(), - "Programs with more than 2^24 labels are unsupported right now", - ); - realized_ops.push(RealizedOp { - opcode: VirtualOp::JI(imm), - owning_span, - comment, - offset, - }); - } - OrganizationalOp::JumpIfNotEq(r1, r2, ref lab) => { - let imm = VirtualImmediate12::new_unchecked( - *label_namespace.get(lab).unwrap(), - "Programs with more than 2^12 labels are unsupported right now", - ); - realized_ops.push(RealizedOp { - opcode: VirtualOp::JNEI(r1, r2, imm), - owning_span, - comment, - offset, - }); - } - OrganizationalOp::JumpIfNotZero(r1, ref lab) => { - let imm = VirtualImmediate18::new_unchecked( - *label_namespace.get(lab).unwrap(), - "Programs with more than 2^18 labels are unsupported right now", - ); - realized_ops.push(RealizedOp { - opcode: VirtualOp::JNZI(r1, imm), - owning_span, - comment, - offset, - }); - } - OrganizationalOp::DataSectionOffsetPlaceholder => { - realized_ops.push(RealizedOp { - opcode: VirtualOp::DataSectionOffsetPlaceholder, - owning_span: None, - comment: String::new(), - offset, - }); - } - OrganizationalOp::Comment => continue, - OrganizationalOp::Label(..) => continue, - }, - }; - } - RealizedAbstractInstructionSet { ops: realized_ops } + self } -} -impl fmt::Display for AbstractInstructionSet { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - ".program:\n{}", - self.ops - .iter() - .map(|x| format!("{}", x)) - .collect::>() - .join("\n") - ) - } -} + fn remove_unused_ops(mut self) -> AbstractInstructionSet { + // Just remove NOPs for now. + self.ops.retain(|op| match &op.opcode { + Either::Left(VirtualOp::NOOP) => false, + _otherwise => true, + }); -/// "Realized" here refers to labels -- there are no more organizational -/// ops or labels. In this struct, they are all "realized" to offsets. -pub struct RealizedAbstractInstructionSet { - ops: Vec, -} + self + } -impl RealizedAbstractInstructionSet { /// Assigns an allocatable register to each virtual register used by some instruction in the /// list `self.ops`. The algorithm used is Chaitin's graph-coloring register allocation /// algorithm (https://en.wikipedia.org/wiki/Chaitin%27s_algorithm). The individual steps of @@ -203,7 +122,7 @@ impl RealizedAbstractInstructionSet { pub(crate) fn allocate_registers( self, register_sequencer: &mut RegisterSequencer, - ) -> InstructionSet { + ) -> AllocatedAbstractInstructionSet { // Step 1: Liveness Analysis. let live_out = register_allocator::liveness_analysis(&self.ops); @@ -226,26 +145,69 @@ impl RealizedAbstractInstructionSet { // Step 5: Use the stack to assign a register for each virtual register. let pool = register_allocator::assign_registers(&mut stack); - // Steph 6: Update all instructions to use the resulting register pool. + // Step 6: Update all instructions to use the resulting register pool. let mut buf = vec![]; for op in &reduced_ops { - buf.push(AllocatedOp { - opcode: op.opcode.allocate_registers(&pool), + buf.push(AllocatedAbstractOp { + opcode: op.allocate_registers(&pool), comment: op.comment.clone(), owning_span: op.owning_span.clone(), }) } - InstructionSet { ops: buf } + AllocatedAbstractInstructionSet { ops: buf } } } -/// helper function to check if a label is used in a given buffer of ops -fn label_is_used(buf: &[Op], label: &Label) -> bool { - buf.iter().any(|Op { ref opcode, .. }| match opcode { - Either::Right(OrganizationalOp::Jump(ref l)) if label == l => true, - Either::Right(OrganizationalOp::JumpIfNotEq(_, _, ref l)) if label == l => true, - Either::Right(OrganizationalOp::JumpIfNotZero(_, ref l)) if label == l => true, - _ => false, - }) +impl fmt::Display for AbstractInstructionSet { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + ".program:\n{}", + self.ops + .iter() + .map(|x| format!("{}", x)) + .collect::>() + .join("\n") + ) + } +} + +/// "Realized" here refers to labels -- there are no more organizational +/// ops or labels. In this struct, they are all "realized" to offsets. +pub struct RealizedAbstractInstructionSet { + pub(super) ops: Vec, +} + +impl RealizedAbstractInstructionSet { + pub(crate) fn pad_to_even(self) -> Vec { + let mut ops = self + .ops + .into_iter() + .map( + |RealizedOp { + opcode, + comment, + owning_span, + offset: _, + }| { + AllocatedOp { + opcode, + comment, + owning_span, + } + }, + ) + .collect::>(); + + if ops.len() & 1 != 0 { + ops.push(AllocatedOp { + opcode: crate::asm_lang::allocated_ops::AllocatedOpcode::NOOP, + comment: "word-alignment of data section".into(), + owning_span: None, + }); + } + + ops + } } diff --git a/sway-core/src/asm_generation/allocated_abstract_instruction_set.rs b/sway-core/src/asm_generation/allocated_abstract_instruction_set.rs new file mode 100644 index 00000000000..39ebf549747 --- /dev/null +++ b/sway-core/src/asm_generation/allocated_abstract_instruction_set.rs @@ -0,0 +1,306 @@ +use crate::asm_lang::{ + allocated_ops::{AllocatedOpcode, AllocatedRegister}, + AllocatedAbstractOp, ConstantRegister, ControlFlowOp, Label, RealizedOp, VirtualImmediate12, + VirtualImmediate18, VirtualImmediate24, +}; + +use super::{DataSection, RealizedAbstractInstructionSet}; + +use sway_types::span::Span; + +use std::collections::{BTreeSet, HashMap, HashSet}; + +use either::Either; + +#[derive(Clone)] +pub struct AllocatedAbstractInstructionSet { + pub(crate) ops: Vec, +} + +impl AllocatedAbstractInstructionSet { + /// Replace each PUSHA instruction with stores of all used registers to the stack, and each + /// POPA with respective loads from the stack. + /// + /// Typically there will be only one of each but the code here allows for nested sections or + /// even overlapping sections. + pub(crate) fn emit_pusha_popa(mut self) -> Self { + // Gather the sets of used registers per section. Using a fold here because it's actually + // simpler to manage. We use a HashSet to keep track of the active section labels and then + // build a HashMap of Label to HashSet of registers. + let reg_sets = self + .ops + .iter() + .fold( + (HashMap::new(), HashSet::new()), + |(mut reg_sets, mut active_sets), op| { + let reg = match &op.opcode { + Either::Right(ControlFlowOp::PushAll(label)) => { + active_sets.insert(*label); + None + } + Either::Right(ControlFlowOp::PopAll(label)) => { + active_sets.remove(label); + None + } + + Either::Left(alloc_op) => alloc_op.def_registers().into_iter().next(), + Either::Right(ctrl_op) => ctrl_op.def_registers().into_iter().next(), + }; + + if let Some(reg) = reg { + for active_label in active_sets.clone() { + reg_sets + .entry(active_label) + .and_modify(|regs: &mut BTreeSet| { + regs.insert(reg.clone()); + }) + .or_insert_with(|| { + BTreeSet::from_iter(std::iter::once(reg).cloned()) + }); + } + } + + (reg_sets, active_sets) + }, + ) + .0; + + // Now replace the PUSHA/POPA instructions with STOREs and LOADs. + self.ops = self.ops.drain(..).fold(Vec::new(), |mut new_ops, op| { + match &op.opcode { + Either::Right(ControlFlowOp::PushAll(label)) => { + let regs = reg_sets + .get(label) + .expect("Have collected registers above.") + .iter() + .filter(|reg| matches!(reg, AllocatedRegister::Allocated(_))) + .collect::>(); + + let stack_use_bytes = regs.len() as u64 * 8; + new_ops.push(AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::MOVE( + AllocatedRegister::Constant(ConstantRegister::Scratch), + AllocatedRegister::Constant(ConstantRegister::StackPointer), + )), + comment: "save base stack value".into(), + owning_span: None, + }); + new_ops.push(AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::CFEI( + VirtualImmediate24::new(stack_use_bytes, Span::dummy()).unwrap(), + )), + comment: "reserve space for saved registers".into(), + owning_span: None, + }); + + regs.into_iter().enumerate().for_each(|(idx, reg)| { + let store_op = AllocatedOpcode::SW( + AllocatedRegister::Constant(ConstantRegister::Scratch), + reg.clone(), + VirtualImmediate12::new(idx as u64, Span::dummy()).unwrap(), + ); + new_ops.push(AllocatedAbstractOp { + opcode: Either::Left(store_op), + comment: format!("save {}", reg), + owning_span: None, + }); + }) + } + + Either::Right(ControlFlowOp::PopAll(label)) => { + let regs = reg_sets + .get(label) + .expect("Have collected registers above.") + .iter() + .filter(|reg| matches!(reg, AllocatedRegister::Allocated(_))) + .collect::>(); + + let stack_use_bytes = regs.len() as u64 * 8; + new_ops.push(AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::SUBI( + AllocatedRegister::Constant(ConstantRegister::Scratch), + AllocatedRegister::Constant(ConstantRegister::StackPointer), + VirtualImmediate12::new(stack_use_bytes, Span::dummy()).unwrap(), + )), + comment: "save base stack value".into(), + owning_span: None, + }); + + regs.into_iter().enumerate().for_each(|(idx, reg)| { + let load_op = AllocatedOpcode::LW( + reg.clone(), + AllocatedRegister::Constant(ConstantRegister::Scratch), + VirtualImmediate12::new(idx as u64, Span::dummy()).unwrap(), + ); + new_ops.push(AllocatedAbstractOp { + opcode: Either::Left(load_op), + comment: format!("restore {}", reg), + owning_span: None, + }); + }); + + new_ops.push(AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::CFSI( + VirtualImmediate24::new(stack_use_bytes, Span::dummy()).unwrap(), + )), + comment: "recover space from saved registers".into(), + owning_span: None, + }); + } + + _otherwise => new_ops.push(op), + }; + new_ops + }); + + self + } + + /// Runs two passes -- one to get the instruction offsets of the labels + /// and one to replace the labels in the organizational ops + pub(crate) fn realize_labels( + self, + data_section: &DataSection, + ) -> RealizedAbstractInstructionSet { + let mut label_namespace: HashMap<&Label, u64> = Default::default(); + let mut offset_map = vec![]; + let mut counter = 0; + for op in &self.ops { + offset_map.push(counter); + match op.opcode { + Either::Right(ControlFlowOp::Label(ref lab)) => { + label_namespace.insert(lab, counter); + } + // A special case for LWDataId which may be 1 or 2 ops, depending on the source size. + Either::Left(AllocatedOpcode::LWDataId(_, ref data_id)) => { + let has_copy_type = data_section.has_copy_type(data_id).expect( + "Internal miscalculation in data section -- \ + data id did not match up to any actual data", + ); + counter += if has_copy_type { 1 } else { 2 }; + } + // these ops will end up being exactly one op, so the counter goes up one + Either::Right(ControlFlowOp::Jump(..)) + | Either::Right(ControlFlowOp::JumpIfNotEq(..)) + | Either::Right(ControlFlowOp::JumpIfNotZero(..)) + | Either::Right(ControlFlowOp::Call(..)) + | Either::Right(ControlFlowOp::MoveAddress(..)) + | Either::Left(_) => { + counter += 1; + } + Either::Right(ControlFlowOp::Comment) => (), + Either::Right(ControlFlowOp::DataSectionOffsetPlaceholder) => { + // If the placeholder is 32 bits, this is 1. if 64, this should be 2. We use LW + // to load the data, which loads a whole word, so for now this is 2. + counter += 2 + } + + Either::Right(ControlFlowOp::PushAll(_)) + | Either::Right(ControlFlowOp::PopAll(_)) => unreachable!( + "fix me, pushall and popall don't really belong in control flow ops \ + since they're not about control flow" + ), + } + } + + let mut realized_ops = vec![]; + for ( + ix, + AllocatedAbstractOp { + opcode, + comment, + owning_span, + }, + ) in self.ops.clone().into_iter().enumerate() + { + let offset = offset_map[ix]; + match opcode { + Either::Left(op) => realized_ops.push(RealizedOp { + opcode: op, + owning_span, + comment, + offset, + }), + Either::Right(org_op) => match org_op { + ControlFlowOp::Jump(ref lab) | ControlFlowOp::Call(ref lab) => { + let imm = VirtualImmediate24::new_unchecked( + *label_namespace.get(lab).unwrap(), + "Programs with more than 2^24 labels are unsupported right now", + ); + realized_ops.push(RealizedOp { + opcode: AllocatedOpcode::JI(imm), + owning_span, + comment, + offset, + }); + } + ControlFlowOp::JumpIfNotEq(r1, r2, ref lab) => { + let imm = VirtualImmediate12::new_unchecked( + *label_namespace.get(lab).unwrap(), + "Programs with more than 2^12 labels are unsupported right now", + ); + realized_ops.push(RealizedOp { + opcode: AllocatedOpcode::JNEI(r1, r2, imm), + owning_span, + comment, + offset, + }); + } + ControlFlowOp::JumpIfNotZero(r1, ref lab) => { + let imm = VirtualImmediate18::new_unchecked( + *label_namespace.get(lab).unwrap(), + "Programs with more than 2^18 labels are unsupported right now", + ); + realized_ops.push(RealizedOp { + opcode: AllocatedOpcode::JNZI(r1, imm), + owning_span, + comment, + offset, + }); + } + ControlFlowOp::MoveAddress(r1, ref lab) => { + let imm = VirtualImmediate18::new_unchecked( + *label_namespace.get(lab).unwrap(), + "Programs with more than 2^18 labels are unsupported right now", + ); + realized_ops.push(RealizedOp { + opcode: AllocatedOpcode::MOVI(r1, imm), + owning_span, + comment, + offset, + }); + } + ControlFlowOp::DataSectionOffsetPlaceholder => { + realized_ops.push(RealizedOp { + opcode: AllocatedOpcode::DataSectionOffsetPlaceholder, + owning_span: None, + comment: String::new(), + offset, + }); + } + ControlFlowOp::Comment => continue, + ControlFlowOp::Label(..) => continue, + + ControlFlowOp::PushAll(_) | ControlFlowOp::PopAll(_) => { + unreachable!("still don't belong in organisational ops") + } + }, + }; + } + RealizedAbstractInstructionSet { ops: realized_ops } + } +} + +impl std::fmt::Display for AllocatedAbstractInstructionSet { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + ".program:\n{}", + self.ops + .iter() + .map(|op| format!("{op}")) + .collect::>() + .join("\n") + ) + } +} diff --git a/sway-core/src/asm_generation/asm_builder.rs b/sway-core/src/asm_generation/asm_builder.rs new file mode 100644 index 00000000000..cbdcf127203 --- /dev/null +++ b/sway-core/src/asm_generation/asm_builder.rs @@ -0,0 +1,1801 @@ +mod functions; + +use super::{ + compiler_constants, from_ir::*, register_sequencer::RegisterSequencer, AbstractInstructionSet, + DataSection, Entry, +}; + +use crate::{ + asm_lang::{virtual_register::*, Label, Op, VirtualImmediate12, VirtualOp}, + error::*, + metadata::MetadataManager, + size_bytes_in_words, size_bytes_round_up_to_word_alignment, +}; + +use sway_ir::*; +use sway_types::{span::Span, Spanned}; + +use fuel_crypto::Hasher; + +use std::{collections::HashMap, sync::Arc}; + +use either::Either; + +pub(super) struct AsmBuilder<'ir> { + // Data section is used by the rest of code gen to layout const memory. + data_section: DataSection, + + // Register sequencer dishes out new registers and labels. + reg_seqr: RegisterSequencer, + + // Label maps are from IR functions or blocks to label name. Functions have a start and end + // label. + func_label_map: HashMap, + block_label_map: HashMap, + + // Reg map is tracking IR values to VM values. Ptr map is tracking IR pointers to local + // storage types. + reg_map: HashMap, + ptr_map: HashMap, + + // The currently compiled function has an end label which is at the end of the function body + // but before the call cleanup, and a copy of the $retv for when the return value is a reference + // type and must be copied in memory. Unless we have nested function declarations this vector + // will usually have 0 or 1 entry. + return_ctxs: Vec<(Label, VirtualRegister)>, + + // Stack size and base register for locals. + locals_ctxs: Vec<(u64, VirtualRegister)>, + + // IR context we're compiling. + context: &'ir Context, + + // Metadata manager for converting metadata to Spans, etc. + md_mgr: MetadataManager, + + // Final resulting VM bytecode ops; entry functions with their function and label, and regular + // non-entry functions. + entries: Vec<(Function, Label, Vec)>, + non_entries: Vec>, + + // In progress VM bytecode ops. + cur_bytecode: Vec, +} + +type AsmBuilderResult = ( + DataSection, + RegisterSequencer, + Vec<(Function, Label, AbstractInstructionSet)>, + Vec, +); + +impl<'ir> AsmBuilder<'ir> { + pub(super) fn new( + data_section: DataSection, + reg_seqr: RegisterSequencer, + context: &'ir Context, + ) -> Self { + AsmBuilder { + data_section, + reg_seqr, + func_label_map: HashMap::new(), + block_label_map: HashMap::new(), + reg_map: HashMap::new(), + ptr_map: HashMap::new(), + return_ctxs: Vec::new(), + locals_ctxs: Vec::new(), + context, + md_mgr: MetadataManager::default(), + entries: Vec::new(), + non_entries: Vec::new(), + cur_bytecode: Vec::new(), + } + } + + // This is here temporarily for in the case when the IR can't absolutely provide a valid span, + // until we can improve ASM block parsing and verification mostly. It's where it's needed the + // most, for returning failure errors. If we move ASM verification to the parser and semantic + // analysis then ASM block conversion shouldn't/can't fail and we won't need to provide a + // guaranteed to be available span. + fn empty_span() -> Span { + let msg = "unknown source location"; + Span::new(Arc::from(msg), 0, msg.len(), None).unwrap() + } + + fn insert_block_label(&mut self, block: Block) { + if &block.get_label(self.context) != "entry" { + let label = self.block_to_label(&block); + self.cur_bytecode.push(Op::unowned_jump_label(label)) + } + } + + pub(super) fn finalize(self) -> AsmBuilderResult { + ( + self.data_section, + self.reg_seqr, + self.entries + .into_iter() + .map(|(f, l, ops)| (f, l, AbstractInstructionSet { ops })) + .collect(), + self.non_entries + .into_iter() + .map(|ops| AbstractInstructionSet { ops }) + .collect(), + ) + } + + fn compile_instruction( + &mut self, + block: &Block, + instr_val: &Value, + func_is_entry: bool, + ) -> CompileResult<()> { + let mut warnings = Vec::new(); + let mut errors = Vec::new(); + if let Some(instruction) = instr_val.get_instruction(self.context) { + match instruction { + Instruction::AddrOf(arg) => self.compile_addr_of(instr_val, arg), + Instruction::AsmBlock(asm, args) => { + check!( + self.compile_asm_block(instr_val, asm, args), + return err(warnings, errors), + warnings, + errors + ) + } + Instruction::BitCast(val, ty) => self.compile_bitcast(instr_val, val, ty), + Instruction::BinaryOp { op, arg1, arg2 } => { + self.compile_binary_op(instr_val, op, arg1, arg2) + } + Instruction::Branch(to_block) => self.compile_branch(block, to_block), + Instruction::Call(func, args) => self.compile_call(instr_val, func, args), + Instruction::Cmp(pred, lhs_value, rhs_value) => { + self.compile_cmp(instr_val, pred, lhs_value, rhs_value) + } + Instruction::ConditionalBranch { + cond_value, + true_block, + false_block, + } => self.compile_conditional_branch(cond_value, block, true_block, false_block), + Instruction::ContractCall { + params, + coins, + asset_id, + gas, + .. + } => self.compile_contract_call(instr_val, params, coins, asset_id, gas), + Instruction::ExtractElement { + array, + ty, + index_val, + } => self.compile_extract_element(instr_val, array, ty, index_val), + Instruction::ExtractValue { + aggregate, indices, .. + } => self.compile_extract_value(instr_val, aggregate, indices), + Instruction::GetStorageKey => { + check!( + self.compile_get_storage_key(instr_val), + return err(warnings, errors), + warnings, + errors + ) + } + Instruction::GetPointer { + base_ptr, + ptr_ty, + offset, + } => self.compile_get_pointer(instr_val, base_ptr, ptr_ty, *offset), + Instruction::Gtf { index, tx_field_id } => { + self.compile_gtf(instr_val, index, *tx_field_id) + } + Instruction::InsertElement { + array, + ty, + value, + index_val, + } => self.compile_insert_element(instr_val, array, ty, value, index_val), + Instruction::InsertValue { + aggregate, + value, + indices, + .. + } => self.compile_insert_value(instr_val, aggregate, value, indices), + Instruction::IntToPtr(val, _) => self.compile_int_to_ptr(instr_val, val), + Instruction::Load(src_val) => check!( + self.compile_load(instr_val, src_val), + return err(warnings, errors), + warnings, + errors + ), + Instruction::Log { + log_val, + log_ty, + log_id, + } => self.compile_log(instr_val, log_val, log_ty, log_id), + Instruction::Nop => (), + Instruction::Phi(_) => (), // Managing the phi value is done in br and cbr compilation. + Instruction::ReadRegister(reg) => self.compile_read_register(instr_val, reg), + Instruction::Ret(ret_val, ty) => { + if func_is_entry { + self.compile_ret_from_entry(instr_val, ret_val, ty) + } else { + self.compile_ret_from_call(instr_val, ret_val, ty) + } + } + Instruction::StateLoadQuadWord { load_val, key } => check!( + self.compile_state_access_quad_word( + instr_val, + load_val, + key, + StateAccessType::Read + ), + return err(warnings, errors), + warnings, + errors + ), + Instruction::StateLoadWord(key) => check!( + self.compile_state_load_word(instr_val, key), + return err(warnings, errors), + warnings, + errors + ), + Instruction::StateStoreQuadWord { stored_val, key } => check!( + self.compile_state_access_quad_word( + instr_val, + stored_val, + key, + StateAccessType::Write + ), + return err(warnings, errors), + warnings, + errors + ), + Instruction::StateStoreWord { stored_val, key } => check!( + self.compile_state_store_word(instr_val, stored_val, key), + return err(warnings, errors), + warnings, + errors + ), + Instruction::Store { + dst_val, + stored_val, + } => check!( + self.compile_store(instr_val, dst_val, stored_val), + return err(warnings, errors), + warnings, + errors + ), + } + } else { + errors.push(CompileError::Internal( + "Value not an instruction.", + self.md_mgr + .val_to_span(self.context, *instr_val) + .unwrap_or_else(Self::empty_span), + )); + } + ok((), warnings, errors) + } + + // OK, I began by trying to translate the IR ASM block data structures back into AST data + // structures which I could feed to the code in asm_generation/expression/mod.rs where it + // compiles the inline ASM. But it's more work to do that than to just re-implement that + // algorithm with the IR data here. + + fn compile_asm_block( + &mut self, + instr_val: &Value, + asm: &AsmBlock, + asm_args: &[AsmArg], + ) -> CompileResult<()> { + let mut warnings: Vec = Vec::new(); + let mut errors: Vec = Vec::new(); + let mut inline_reg_map = HashMap::new(); + let mut inline_ops = Vec::new(); + for AsmArg { name, initializer } in asm_args { + assert_or_warn!( + ConstantRegister::parse_register_name(name.as_str()).is_none(), + warnings, + name.span().clone(), + Warning::ShadowingReservedRegister { + reg_name: name.clone() + } + ); + let arg_reg = initializer + .map(|init_val| self.value_to_register(&init_val)) + .unwrap_or_else(|| self.reg_seqr.next()); + inline_reg_map.insert(name.as_str(), arg_reg); + } + + let realize_register = |reg_name: &str| { + inline_reg_map.get(reg_name).cloned().or_else(|| { + ConstantRegister::parse_register_name(reg_name).map(&VirtualRegister::Constant) + }) + }; + + // For each opcode in the asm expression, attempt to parse it into an opcode and + // replace references to the above registers with the newly allocated ones. + let asm_block = asm.get_content(self.context); + for op in &asm_block.body { + let replaced_registers = op + .args + .iter() + .map(|reg_name| -> Result<_, CompileError> { + realize_register(reg_name.as_str()).ok_or_else(|| { + CompileError::UnknownRegister { + span: reg_name.span(), + initialized_registers: inline_reg_map + .iter() + .map(|(name, _)| *name) + .collect::>() + .join("\n"), + } + }) + }) + .filter_map(|res| match res { + Err(e) => { + errors.push(e); + None + } + Ok(o) => Some(o), + }) + .collect::>(); + + // Parse the actual op and registers. + let op_span = self + .md_mgr + .md_to_span(self.context, op.metadata) + .unwrap_or_else(Self::empty_span); + let opcode = check!( + Op::parse_opcode( + &op.name, + &replaced_registers, + &op.immediate, + op_span.clone(), + ), + return err(warnings, errors), + warnings, + errors + ); + + inline_ops.push(Op { + opcode: either::Either::Left(opcode), + comment: "asm block".into(), + owning_span: Some(op_span), + }); + } + + // Now, load the designated asm return register into the desired return register, but only + // if it was named. + if let Some(ret_reg_name) = &asm_block.return_name { + // Lookup and replace the return register. + let ret_reg = match realize_register(ret_reg_name.as_str()) { + Some(reg) => reg, + None => { + errors.push(CompileError::UnknownRegister { + initialized_registers: inline_reg_map + .iter() + .map(|(name, _)| name.to_string()) + .collect::>() + .join("\n"), + span: ret_reg_name.span(), + }); + return err(warnings, errors); + } + }; + let instr_reg = self.reg_seqr.next(); + inline_ops.push(Op { + opcode: Either::Left(VirtualOp::MOVE(instr_reg.clone(), ret_reg)), + comment: "return value from inline asm".into(), + owning_span: self.md_mgr.val_to_span(self.context, *instr_val), + }); + self.reg_map.insert(*instr_val, instr_reg); + } + + self.cur_bytecode.append(&mut inline_ops); + + ok((), warnings, errors) + } + + fn compile_addr_of(&mut self, instr_val: &Value, arg: &Value) { + let reg = self.value_to_register(arg); + self.reg_map.insert(*instr_val, reg); + } + + fn compile_bitcast(&mut self, instr_val: &Value, bitcast_val: &Value, to_type: &Type) { + let val_reg = self.value_to_register(bitcast_val); + let reg = if let Type::Bool = to_type { + // This may not be necessary if we just treat a non-zero value as 'true'. + let res_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::EQ( + res_reg.clone(), + val_reg, + VirtualRegister::Constant(ConstantRegister::Zero), + )), + comment: "convert to inversed boolean".into(), + owning_span: self.md_mgr.val_to_span(self.context, *instr_val), + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::XORI( + res_reg.clone(), + res_reg.clone(), + VirtualImmediate12 { value: 1 }, + )), + comment: "invert boolean".into(), + owning_span: self.md_mgr.val_to_span(self.context, *instr_val), + }); + res_reg + } else { + // This is a no-op, although strictly speaking Unit should probably be compiled as + // a zero. + val_reg + }; + self.reg_map.insert(*instr_val, reg); + } + + fn compile_binary_op( + &mut self, + instr_val: &Value, + op: &BinaryOpKind, + arg1: &Value, + arg2: &Value, + ) { + let val1_reg = self.value_to_register(arg1); + let val2_reg = self.value_to_register(arg2); + let res_reg = self.reg_seqr.next(); + let opcode = match op { + BinaryOpKind::Add => Either::Left(VirtualOp::ADD(res_reg.clone(), val1_reg, val2_reg)), + BinaryOpKind::Sub => Either::Left(VirtualOp::SUB(res_reg.clone(), val1_reg, val2_reg)), + BinaryOpKind::Mul => Either::Left(VirtualOp::MUL(res_reg.clone(), val1_reg, val2_reg)), + BinaryOpKind::Div => Either::Left(VirtualOp::DIV(res_reg.clone(), val1_reg, val2_reg)), + }; + self.cur_bytecode.push(Op { + opcode, + comment: String::new(), + owning_span: self.md_mgr.val_to_span(self.context, *instr_val), + }); + + self.reg_map.insert(*instr_val, res_reg); + } + + fn compile_branch(&mut self, from_block: &Block, to_block: &Block) { + self.compile_branch_to_phi_value(from_block, to_block); + + let label = self.block_to_label(to_block); + self.cur_bytecode.push(Op::jump_to_label(label)); + } + + fn compile_cmp( + &mut self, + instr_val: &Value, + pred: &Predicate, + lhs_value: &Value, + rhs_value: &Value, + ) { + let lhs_reg = self.value_to_register(lhs_value); + let rhs_reg = self.value_to_register(rhs_value); + let res_reg = self.reg_seqr.next(); + match pred { + Predicate::Equal => { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::EQ(res_reg.clone(), lhs_reg, rhs_reg)), + comment: String::new(), + owning_span: self.md_mgr.val_to_span(self.context, *instr_val), + }); + } + } + self.reg_map.insert(*instr_val, res_reg); + } + + fn compile_conditional_branch( + &mut self, + cond_value: &Value, + from_block: &Block, + true_block: &Block, + false_block: &Block, + ) { + self.compile_branch_to_phi_value(from_block, true_block); + self.compile_branch_to_phi_value(from_block, false_block); + + let cond_reg = self.value_to_register(cond_value); + + let true_label = self.block_to_label(true_block); + self.cur_bytecode + .push(Op::jump_if_not_zero(cond_reg, true_label)); + + let false_label = self.block_to_label(false_block); + self.cur_bytecode.push(Op::jump_to_label(false_label)); + } + + fn compile_branch_to_phi_value(&mut self, from_block: &Block, to_block: &Block) { + if let Some(local_val) = to_block.get_phi_val_coming_from(self.context, from_block) { + // We only need a MOVE here if get_phi_val_coming_from() is actually assigned to a + // register + if let Some(local_reg) = self.opt_value_to_register(&local_val) { + let phi_reg = self.value_to_register(&to_block.get_phi(self.context)); + self.cur_bytecode.push(Op::register_move( + phi_reg, + local_reg, + "branch to phi value", + None, + )); + } + } + } + + #[allow(clippy::too_many_arguments)] + fn compile_contract_call( + &mut self, + instr_val: &Value, + params: &Value, + coins: &Value, + asset_id: &Value, + gas: &Value, + ) { + let ra_pointer = self.value_to_register(params); + let coins_register = self.value_to_register(coins); + let asset_id_register = self.value_to_register(asset_id); + let gas_register = self.value_to_register(gas); + + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::CALL( + ra_pointer, + coins_register, + asset_id_register, + gas_register, + )), + comment: "call external contract".into(), + owning_span: self.md_mgr.val_to_span(self.context, *instr_val), + }); + + // now, move the return value of the contract call to the return register. + // TODO validate RETL matches the expected type (this is a comment from the old codegen) + let instr_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op::register_move( + instr_reg.clone(), + VirtualRegister::Constant(ConstantRegister::ReturnValue), + "save call result", + None, + )); + self.reg_map.insert(*instr_val, instr_reg); + } + + fn compile_extract_element( + &mut self, + instr_val: &Value, + array: &Value, + ty: &Aggregate, + index_val: &Value, + ) { + // Base register should pointer to some stack allocated memory. + let base_reg = self.value_to_register(array); + + // Index value is the array element index, not byte nor word offset. + let index_reg = self.value_to_register(index_val); + let rel_offset_reg = match index_reg { + VirtualRegister::Virtual(_) => { + // We can reuse the register. + index_reg.clone() + } + VirtualRegister::Constant(_) => { + // We have a constant register, cannot reuse it. + self.reg_seqr.next() + } + }; + + // We could put the OOB check here, though I'm now thinking it would be too wasteful. + // See compile_bounds_assertion() in expression/array.rs (or look in Git history). + + let instr_reg = self.reg_seqr.next(); + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + let elem_type = ty.get_elem_type(self.context).unwrap(); + let elem_size = ir_type_size_in_bytes(self.context, &elem_type); + if elem_type.is_copy_type() { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MULI( + rel_offset_reg.clone(), + index_reg, + VirtualImmediate12 { value: 8 }, + )), + comment: "extract_element relative offset".into(), + owning_span: owning_span.clone(), + }); + let elem_offs_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD( + elem_offs_reg.clone(), + base_reg, + rel_offset_reg, + )), + comment: "extract_element absolute offset".into(), + owning_span: owning_span.clone(), + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LW( + instr_reg.clone(), + elem_offs_reg, + VirtualImmediate12 { value: 0 }, + )), + comment: "extract_element".into(), + owning_span, + }); + } else { + // Value too big for a register, so we return the memory offset. + if elem_size > compiler_constants::TWELVE_BITS { + let size_data_id = self + .data_section + .insert_data_value(Entry::new_word(elem_size, None)); + let size_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LWDataId(size_reg.clone(), size_data_id)), + owning_span: owning_span.clone(), + comment: "loading element size for relative offset".into(), + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MUL(instr_reg.clone(), index_reg, size_reg)), + comment: "extract_element relative offset".into(), + owning_span: owning_span.clone(), + }); + } else { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MULI( + instr_reg.clone(), + index_reg, + VirtualImmediate12 { + value: elem_size as u16, + }, + )), + comment: "extract_element relative offset".into(), + owning_span: owning_span.clone(), + }); + } + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD( + instr_reg.clone(), + base_reg, + instr_reg.clone(), + )), + comment: "extract_element absolute offset".into(), + owning_span, + }); + } + + self.reg_map.insert(*instr_val, instr_reg); + } + + fn compile_extract_value(&mut self, instr_val: &Value, aggregate_val: &Value, indices: &[u64]) { + // Base register should pointer to some stack allocated memory. + let base_reg = self.value_to_register(aggregate_val); + let ((extract_offset, _), field_type) = aggregate_idcs_to_field_layout( + self.context, + &aggregate_val.get_stripped_ptr_type(self.context).unwrap(), + indices, + ); + + let instr_reg = self.reg_seqr.next(); + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + if field_type.is_copy_type() { + if extract_offset > compiler_constants::TWELVE_BITS { + let offset_reg = self.reg_seqr.next(); + self.number_to_reg(extract_offset, &offset_reg, owning_span.clone()); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD( + offset_reg.clone(), + base_reg.clone(), + base_reg, + )), + comment: "add array base to offset".into(), + owning_span: owning_span.clone(), + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LW( + instr_reg.clone(), + offset_reg, + VirtualImmediate12 { value: 0 }, + )), + comment: format!( + "extract_value @ {}", + indices + .iter() + .map(|idx| format!("{}", idx)) + .collect::>() + .join(",") + ), + owning_span, + }); + } else { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LW( + instr_reg.clone(), + base_reg, + VirtualImmediate12 { + value: extract_offset as u16, + }, + )), + comment: format!( + "extract_value @ {}", + indices + .iter() + .map(|idx| format!("{}", idx)) + .collect::>() + .join(",") + ), + owning_span, + }); + } + } else { + // Value too big for a register, so we return the memory offset. + if extract_offset * 8 > compiler_constants::TWELVE_BITS { + let offset_reg = self.reg_seqr.next(); + self.number_to_reg(extract_offset * 8, &offset_reg, owning_span.clone()); + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADD( + instr_reg.clone(), + base_reg, + offset_reg, + )), + comment: "extract address".into(), + owning_span, + }); + } else { + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADDI( + instr_reg.clone(), + base_reg, + VirtualImmediate12 { + value: (extract_offset * 8) as u16, + }, + )), + comment: "extract address".into(), + owning_span, + }); + } + } + + self.reg_map.insert(*instr_val, instr_reg); + } + + fn compile_get_storage_key(&mut self, instr_val: &Value) -> CompileResult<()> { + let warnings: Vec = Vec::new(); + let mut errors: Vec = Vec::new(); + + let state_idx = self.md_mgr.val_to_storage_key(self.context, *instr_val); + let instr_span = self.md_mgr.val_to_span(self.context, *instr_val); + + let storage_slot_to_hash = match state_idx { + Some(state_idx) => { + format!( + "{}{}", + sway_utils::constants::STORAGE_DOMAIN_SEPARATOR, + state_idx + ) + } + None => { + errors.push(CompileError::Internal( + "State index for __get_storage_key is not available as a metadata", + instr_span.unwrap_or_else(Self::empty_span), + )); + return err(warnings, errors); + } + }; + + let hashed_storage_slot = Hasher::hash(storage_slot_to_hash); + + let data_id = self + .data_section + .insert_data_value(Entry::new_byte_array((*hashed_storage_slot).to_vec(), None)); + + // Allocate a register for it, and a load instruction. + let reg = self.reg_seqr.next(); + + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::LWDataId(reg.clone(), data_id)), + comment: "literal instantiation".into(), + owning_span: instr_span, + }); + self.reg_map.insert(*instr_val, reg); + ok((), warnings, errors) + } + + fn compile_get_pointer( + &mut self, + instr_val: &Value, + base_ptr: &Pointer, + ptr_ty: &Pointer, + offset: u64, + ) { + // `get_ptr` is like a `load` except the value isn't dereferenced. + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + match self.ptr_map.get(base_ptr) { + None => unimplemented!("BUG? Uninitialised pointer."), + Some(storage) => match storage.clone() { + Storage::Data(_data_id) => { + // Not sure if we'll ever need this. + unimplemented!("TODO get_ptr() into the data section."); + } + Storage::Stack(word_offs) => { + let ptr_ty_size_in_bytes = + ir_type_size_in_bytes(self.context, ptr_ty.get_type(self.context)); + + let offset_in_bytes = word_offs * 8 + ptr_ty_size_in_bytes * offset; + let instr_reg = self.reg_seqr.next(); + if offset_in_bytes > compiler_constants::TWELVE_BITS { + self.number_to_reg(offset_in_bytes, &instr_reg, owning_span.clone()); + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADD( + instr_reg.clone(), + self.locals_base_reg().clone(), + instr_reg.clone(), + )), + comment: "get offset reg for get_ptr".into(), + owning_span, + }); + } else { + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADDI( + instr_reg.clone(), + self.locals_base_reg().clone(), + VirtualImmediate12 { + value: (offset_in_bytes) as u16, + }, + )), + comment: "get offset reg for get_ptr".into(), + owning_span, + }); + } + self.reg_map.insert(*instr_val, instr_reg); + } + }, + } + } + + fn compile_gtf(&mut self, instr_val: &Value, index: &Value, tx_field_id: u64) { + let instr_reg = self.reg_seqr.next(); + let index_reg = self.value_to_register(index); + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::GTF( + instr_reg.clone(), + index_reg, + VirtualImmediate12 { + value: tx_field_id as u16, + }, + )), + comment: "get transaction field".into(), + owning_span: self.md_mgr.val_to_span(self.context, *instr_val), + }); + self.reg_map.insert(*instr_val, instr_reg); + } + + fn compile_insert_element( + &mut self, + instr_val: &Value, + array: &Value, + ty: &Aggregate, + value: &Value, + index_val: &Value, + ) { + // Base register should point to some stack allocated memory. + let base_reg = self.value_to_register(array); + let insert_reg = self.value_to_register(value); + + // Index value is the array element index, not byte nor word offset. + let index_reg = self.value_to_register(index_val); + let rel_offset_reg = match index_reg { + VirtualRegister::Virtual(_) => { + // We can reuse the register. + index_reg.clone() + } + VirtualRegister::Constant(_) => { + // We have a constant register, cannot reuse it. + self.reg_seqr.next() + } + }; + + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + + let elem_type = ty.get_elem_type(self.context).unwrap(); + let elem_size = ir_type_size_in_bytes(self.context, &elem_type); + if elem_type.is_copy_type() { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MULI( + rel_offset_reg.clone(), + index_reg, + VirtualImmediate12 { value: 8 }, + )), + comment: "insert_element relative offset".into(), + owning_span: owning_span.clone(), + }); + let elem_offs_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD( + elem_offs_reg.clone(), + base_reg.clone(), + rel_offset_reg, + )), + comment: "insert_element absolute offset".into(), + owning_span: owning_span.clone(), + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::SW( + elem_offs_reg, + insert_reg, + VirtualImmediate12 { value: 0 }, + )), + comment: "insert_element".into(), + owning_span, + }); + } else { + // Element size is larger than 8; we switch to bytewise offsets and sizes and use MCP. + if elem_size > compiler_constants::TWELVE_BITS { + todo!("array element size bigger than 4k") + } else { + let elem_index_offs_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MULI( + elem_index_offs_reg.clone(), + index_reg, + VirtualImmediate12 { + value: elem_size as u16, + }, + )), + comment: "insert_element relative offset".into(), + owning_span: owning_span.clone(), + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD( + elem_index_offs_reg.clone(), + base_reg.clone(), + elem_index_offs_reg.clone(), + )), + comment: "insert_element absolute offset".into(), + owning_span: owning_span.clone(), + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MCPI( + elem_index_offs_reg, + insert_reg, + VirtualImmediate12 { + value: elem_size as u16, + }, + )), + comment: "insert_element store value".into(), + owning_span, + }); + } + } + + // We set the 'instruction' register to the base register, so that cascading inserts will + // work. + self.reg_map.insert(*instr_val, base_reg); + } + + fn compile_insert_value( + &mut self, + instr_val: &Value, + aggregate_val: &Value, + value: &Value, + indices: &[u64], + ) { + // Base register should point to some stack allocated memory. + let base_reg = self.value_to_register(aggregate_val); + + let insert_reg = self.value_to_register(value); + let ((mut insert_offs, field_size_in_bytes), field_type) = aggregate_idcs_to_field_layout( + self.context, + &aggregate_val.get_stripped_ptr_type(self.context).unwrap(), + indices, + ); + + let value_type = value.get_stripped_ptr_type(self.context).unwrap(); + let value_size_in_bytes = ir_type_size_in_bytes(self.context, &value_type); + let value_size_in_words = size_bytes_in_words!(value_size_in_bytes); + + // Account for the padding if the final field type is a union and the value we're trying to + // insert is smaller than the size of the union (i.e. we're inserting a small variant). + if matches!(field_type, Type::Union(_)) { + let field_size_in_words = size_bytes_in_words!(field_size_in_bytes); + assert!(field_size_in_words >= value_size_in_words); + insert_offs += field_size_in_words - value_size_in_words; + } + + let indices_str = indices + .iter() + .map(|idx| format!("{}", idx)) + .collect::>() + .join(","); + + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + + if value_type.is_copy_type() { + if insert_offs > compiler_constants::TWELVE_BITS { + let insert_offs_reg = self.reg_seqr.next(); + self.number_to_reg(insert_offs, &insert_offs_reg, owning_span.clone()); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD( + base_reg.clone(), + base_reg.clone(), + insert_offs_reg, + )), + comment: "insert_value absolute offset".into(), + owning_span: owning_span.clone(), + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::SW( + base_reg.clone(), + insert_reg, + VirtualImmediate12 { value: 0 }, + )), + comment: format!("insert_value @ {}", indices_str), + owning_span, + }); + } else { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::SW( + base_reg.clone(), + insert_reg, + VirtualImmediate12 { + value: insert_offs as u16, + }, + )), + comment: format!("insert_value @ {}", indices_str), + owning_span, + }); + } + } else { + let offs_reg = self.reg_seqr.next(); + if insert_offs * 8 > compiler_constants::TWELVE_BITS { + self.number_to_reg(insert_offs * 8, &offs_reg, owning_span.clone()); + } else { + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADDI( + offs_reg.clone(), + base_reg.clone(), + VirtualImmediate12 { + value: (insert_offs * 8) as u16, + }, + )), + comment: format!("get struct field(s) {} offset", indices_str), + owning_span: owning_span.clone(), + }); + } + if value_size_in_bytes > compiler_constants::TWELVE_BITS { + let size_reg = self.reg_seqr.next(); + self.number_to_reg(value_size_in_bytes, &size_reg, owning_span.clone()); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MCP(offs_reg, insert_reg, size_reg)), + comment: "store struct field value".into(), + owning_span, + }); + } else { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MCPI( + offs_reg, + insert_reg, + VirtualImmediate12 { + value: value_size_in_bytes as u16, + }, + )), + comment: "store struct field value".into(), + owning_span, + }); + } + } + + // We set the 'instruction' register to the base register, so that cascading inserts will + // work. + self.reg_map.insert(*instr_val, base_reg); + } + + fn compile_int_to_ptr(&mut self, instr_val: &Value, int_to_ptr_val: &Value) { + let val_reg = self.value_to_register(int_to_ptr_val); + self.reg_map.insert(*instr_val, val_reg); + } + + fn compile_load(&mut self, instr_val: &Value, src_val: &Value) -> CompileResult<()> { + let ptr = self.resolve_ptr(src_val); + if ptr.value.is_none() { + return ptr.map(|_| ()); + } + let (ptr, _ptr_ty, _offset) = ptr.value.unwrap(); + let instr_reg = self.reg_seqr.next(); + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + match self.ptr_map.get(&ptr) { + None => unimplemented!("BUG? Uninitialised pointer."), + Some(storage) => match storage.clone() { + Storage::Data(data_id) => { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LWDataId(instr_reg.clone(), data_id)), + comment: "load constant".into(), + owning_span, + }); + } + Storage::Stack(word_offs) => { + let base_reg = self.locals_base_reg().clone(); + if ptr.get_type(self.context).is_copy_type() { + // Value can fit in a register, so we load the value. + if word_offs > compiler_constants::TWELVE_BITS { + let offs_reg = self.reg_seqr.next(); + self.number_to_reg( + word_offs * 8, // Base reg for LW is in bytes + &offs_reg, + owning_span.clone(), + ); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD( + offs_reg.clone(), + base_reg, + offs_reg.clone(), + )), + comment: "absolute offset for load".into(), + owning_span: owning_span.clone(), + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LW( + instr_reg.clone(), + offs_reg.clone(), + VirtualImmediate12 { value: 0 }, + )), + comment: "load value".into(), + owning_span, + }); + } else { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LW( + instr_reg.clone(), + base_reg, + VirtualImmediate12 { + value: word_offs as u16, + }, + )), + comment: "load value".into(), + owning_span, + }); + } + } else { + // Value too big for a register, so we return the memory offset. This is + // what LW to the data section does, via LWDataId. + let word_offs = word_offs * 8; + if word_offs > compiler_constants::TWELVE_BITS { + let offs_reg = self.reg_seqr.next(); + self.number_to_reg(word_offs, &offs_reg, owning_span.clone()); + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADD( + instr_reg.clone(), + base_reg, + offs_reg, + )), + comment: "load address".into(), + owning_span, + }); + } else { + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADDI( + instr_reg.clone(), + base_reg, + VirtualImmediate12 { + value: word_offs as u16, + }, + )), + comment: "load address".into(), + owning_span, + }); + } + } + } + }, + } + self.reg_map.insert(*instr_val, instr_reg); + ok((), Vec::new(), Vec::new()) + } + + fn compile_log(&mut self, instr_val: &Value, log_val: &Value, log_ty: &Type, log_id: &Value) { + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + let log_val_reg = self.value_to_register(log_val); + let log_id_reg = self.value_to_register(log_id); + + if log_ty.is_copy_type() { + self.cur_bytecode.push(Op { + owning_span, + opcode: Either::Left(VirtualOp::LOG( + log_val_reg, + log_id_reg, + VirtualRegister::Constant(ConstantRegister::Zero), + VirtualRegister::Constant(ConstantRegister::Zero), + )), + comment: "".into(), + }); + } else { + // If the type not a reference type then we use LOGD to log the data. First put the + // size into the data section, then add a LW to get it, then add a LOGD which uses + // it. + let size_reg = self.reg_seqr.next(); + let size_in_bytes = ir_type_size_in_bytes(self.context, log_ty); + let size_data_id = self + .data_section + .insert_data_value(Entry::new_word(size_in_bytes, None)); + + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LWDataId(size_reg.clone(), size_data_id)), + owning_span: owning_span.clone(), + comment: "loading size for LOGD".into(), + }); + self.cur_bytecode.push(Op { + owning_span, + opcode: Either::Left(VirtualOp::LOGD( + VirtualRegister::Constant(ConstantRegister::Zero), + log_id_reg, + log_val_reg, + size_reg, + )), + comment: "".into(), + }); + } + } + + fn compile_read_register(&mut self, instr_val: &Value, reg: &sway_ir::Register) { + let instr_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MOVE( + instr_reg.clone(), + VirtualRegister::Constant(match reg { + sway_ir::Register::Of => ConstantRegister::Overflow, + sway_ir::Register::Pc => ConstantRegister::ProgramCounter, + sway_ir::Register::Ssp => ConstantRegister::StackStartPointer, + sway_ir::Register::Sp => ConstantRegister::StackPointer, + sway_ir::Register::Fp => ConstantRegister::FramePointer, + sway_ir::Register::Hp => ConstantRegister::HeapPointer, + sway_ir::Register::Error => ConstantRegister::Error, + sway_ir::Register::Ggas => ConstantRegister::GlobalGas, + sway_ir::Register::Cgas => ConstantRegister::ContextGas, + sway_ir::Register::Bal => ConstantRegister::Balance, + sway_ir::Register::Is => ConstantRegister::InstructionStart, + sway_ir::Register::Ret => ConstantRegister::ReturnValue, + sway_ir::Register::Retl => ConstantRegister::ReturnLength, + sway_ir::Register::Flag => ConstantRegister::Flags, + }), + )), + comment: "move register into abi function".to_owned(), + owning_span: self.md_mgr.val_to_span(self.context, *instr_val), + }); + + self.reg_map.insert(*instr_val, instr_reg); + } + + fn compile_ret_from_entry(&mut self, instr_val: &Value, ret_val: &Value, ret_type: &Type) { + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + if ret_type.eq(self.context, &Type::Unit) { + // Unit returns should always be zero, although because they can be omitted from + // functions, the register is sometimes uninitialized. Manually return zero in this + // case. + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::RET(VirtualRegister::Constant( + ConstantRegister::Zero, + ))), + owning_span, + comment: "returning unit as zero".into(), + }); + } else { + let ret_reg = self.value_to_register(ret_val); + + if ret_type.is_copy_type() { + self.cur_bytecode.push(Op { + owning_span, + opcode: Either::Left(VirtualOp::RET(ret_reg)), + comment: "".into(), + }); + } else { + // If the type not a reference type then we use RETD to return data. First put the + // size into the data section, then add a LW to get it, then add a RETD which uses + // it. + let size_reg = self.reg_seqr.next(); + let size_in_bytes = ir_type_size_in_bytes(self.context, ret_type); + let size_data_id = self + .data_section + .insert_data_value(Entry::new_word(size_in_bytes, None)); + + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LWDataId(size_reg.clone(), size_data_id)), + owning_span: owning_span.clone(), + comment: "loading size for RETD".into(), + }); + self.cur_bytecode.push(Op { + owning_span, + opcode: Either::Left(VirtualOp::RETD(ret_reg, size_reg)), + comment: "".into(), + }); + } + } + } + + fn offset_reg( + &mut self, + base_reg: &VirtualRegister, + offset_in_bytes: u64, + span: Option, + ) -> VirtualRegister { + let offset_reg = self.reg_seqr.next(); + if offset_in_bytes > compiler_constants::TWELVE_BITS { + let offs_reg = self.reg_seqr.next(); + self.number_to_reg(offset_in_bytes, &offs_reg, span.clone()); + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADD( + offset_reg.clone(), + base_reg.clone(), + offs_reg, + )), + comment: "get offset".into(), + owning_span: span, + }); + } else { + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADDI( + offset_reg.clone(), + base_reg.clone(), + VirtualImmediate12 { + value: offset_in_bytes as u16, + }, + )), + comment: "get offset".into(), + owning_span: span, + }); + } + + offset_reg + } + + fn compile_state_access_quad_word( + &mut self, + instr_val: &Value, + val: &Value, + key: &Value, + access_type: StateAccessType, + ) -> CompileResult<()> { + // Make sure that both val and key are pointers to B256. + assert!(matches!( + val.get_stripped_ptr_type(self.context), + Some(Type::B256) + )); + assert!(matches!( + key.get_stripped_ptr_type(self.context), + Some(Type::B256) + )); + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + + let key_ptr = self.resolve_ptr(key); + if key_ptr.value.is_none() { + return key_ptr.map(|_| ()); + } + let (key_ptr, ptr_ty, offset) = key_ptr.value.unwrap(); + + // Not expecting an offset here nor a pointer cast + assert!(offset == 0); + assert!(ptr_ty.get_type(self.context).eq(self.context, &Type::B256)); + + let val_reg = if matches!( + val.get_instruction(self.context), + Some(Instruction::IntToPtr(..)) + ) { + match self.reg_map.get(val) { + Some(vreg) => vreg.clone(), + None => unreachable!("int_to_ptr instruction doesn't have vreg mapped"), + } + } else { + // Expect ptr_ty here to also be b256 and offset to be whatever... + let val_ptr = self.resolve_ptr(val); + if val_ptr.value.is_none() { + return val_ptr.map(|_| ()); + } + let (val_ptr, ptr_ty, offset) = val_ptr.value.unwrap(); + // Expect the ptr_ty for val to also be B256 + assert!(ptr_ty.get_type(self.context).eq(self.context, &Type::B256)); + match self.ptr_map.get(&val_ptr) { + Some(Storage::Stack(val_offset)) => { + let base_reg = self.locals_base_reg().clone(); + let val_offset_in_bytes = val_offset * 8 + offset * 32; + self.offset_reg(&base_reg, val_offset_in_bytes, owning_span.clone()) + } + _ => unreachable!("Unexpected storage locations for key and val"), + } + }; + + let key_reg = match self.ptr_map.get(&key_ptr) { + Some(Storage::Stack(key_offset)) => { + let base_reg = self.locals_base_reg().clone(); + let key_offset_in_bytes = key_offset * 8; + self.offset_reg(&base_reg, key_offset_in_bytes, owning_span.clone()) + } + _ => unreachable!("Unexpected storage locations for key and val"), + }; + + self.cur_bytecode.push(Op { + opcode: Either::Left(match access_type { + StateAccessType::Read => VirtualOp::SRWQ(val_reg, key_reg), + StateAccessType::Write => VirtualOp::SWWQ(key_reg, val_reg), + }), + comment: "quad word state access".into(), + owning_span, + }); + ok((), Vec::new(), Vec::new()) + } + + fn compile_state_load_word(&mut self, instr_val: &Value, key: &Value) -> CompileResult<()> { + // Make sure that the key is a pointers to B256. + assert!(matches!( + key.get_stripped_ptr_type(self.context), + Some(Type::B256) + )); + + let key_ptr = self.resolve_ptr(key); + if key_ptr.value.is_none() { + return key_ptr.map(|_| ()); + } + let (key_ptr, ptr_ty, offset) = key_ptr.value.unwrap(); + + // Not expecting an offset here nor a pointer cast + assert!(offset == 0); + assert!(ptr_ty.get_type(self.context).eq(self.context, &Type::B256)); + + let load_reg = self.reg_seqr.next(); + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + match self.ptr_map.get(&key_ptr) { + Some(Storage::Stack(key_offset)) => { + let base_reg = self.locals_base_reg().clone(); + let key_offset_in_bytes = key_offset * 8; + + let key_reg = self.offset_reg(&base_reg, key_offset_in_bytes, owning_span.clone()); + + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::SRW(load_reg.clone(), key_reg)), + comment: "single word state access".into(), + owning_span, + }); + } + _ => unreachable!("Unexpected storage location for key"), + } + + self.reg_map.insert(*instr_val, load_reg); + ok((), Vec::new(), Vec::new()) + } + + fn compile_state_store_word( + &mut self, + instr_val: &Value, + store_val: &Value, + key: &Value, + ) -> CompileResult<()> { + // Make sure that key is a pointer to B256. + assert!(matches!( + key.get_stripped_ptr_type(self.context), + Some(Type::B256) + )); + + // Make sure that store_val is a U64 value. + assert!(matches!( + store_val.get_type(self.context), + Some(Type::Uint(64)) + )); + let store_reg = self.value_to_register(store_val); + + // Expect the get_ptr here to have type b256 and offset = 0??? + let key_ptr = self.resolve_ptr(key); + if key_ptr.value.is_none() { + return key_ptr.map(|_| ()); + } + let (key_ptr, ptr_ty, offset) = key_ptr.value.unwrap(); + + // Not expecting an offset here nor a pointer cast + assert!(offset == 0); + assert!(ptr_ty.get_type(self.context).eq(self.context, &Type::B256)); + + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + match self.ptr_map.get(&key_ptr) { + Some(Storage::Stack(key_offset)) => { + let base_reg = self.locals_base_reg().clone(); + let key_offset_in_bytes = key_offset * 8; + + let key_reg = self.offset_reg(&base_reg, key_offset_in_bytes, owning_span.clone()); + + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::SWW(key_reg, store_reg)), + comment: "single word state access".into(), + owning_span, + }); + } + _ => unreachable!("Unexpected storage locations for key and store_val"), + } + + ok((), Vec::new(), Vec::new()) + } + + fn compile_store( + &mut self, + instr_val: &Value, + dst_val: &Value, + stored_val: &Value, + ) -> CompileResult<()> { + let ptr = self.resolve_ptr(dst_val); + if ptr.value.is_none() { + return ptr.map(|_| ()); + } + let (ptr, _ptr_ty, _offset) = ptr.value.unwrap(); + let stored_reg = self.value_to_register(stored_val); + let is_aggregate_ptr = ptr.is_aggregate_ptr(self.context); + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + match self.ptr_map.get(&ptr) { + None => unreachable!("Bug! Trying to store to an unknown pointer."), + Some(storage) => match storage { + Storage::Data(_) => unreachable!("BUG! Trying to store to the data section."), + Storage::Stack(word_offs) => { + let word_offs = *word_offs; + let store_type = ptr.get_type(self.context); + let store_size_in_words = + size_bytes_in_words!(ir_type_size_in_bytes(self.context, store_type)); + if store_type.is_copy_type() { + let base_reg = self.locals_base_reg().clone(); + + // A single word can be stored with SW. + let stored_reg = if !is_aggregate_ptr { + // stored_reg is a value. + stored_reg + } else { + // stored_reg is a pointer, even though size is 1. We need to load it. + let tmp_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LW( + tmp_reg.clone(), + stored_reg, + VirtualImmediate12 { value: 0 }, + )), + comment: "load for store".into(), + owning_span: owning_span.clone(), + }); + tmp_reg + }; + if word_offs > compiler_constants::TWELVE_BITS { + let offs_reg = self.reg_seqr.next(); + self.number_to_reg( + word_offs * 8, // Base reg for SW is in bytes + &offs_reg, + owning_span.clone(), + ); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD( + offs_reg.clone(), + base_reg, + offs_reg.clone(), + )), + comment: "store absolute offset".into(), + owning_span: owning_span.clone(), + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::SW( + offs_reg, + stored_reg, + VirtualImmediate12 { value: 0 }, + )), + comment: "store value".into(), + owning_span, + }); + } else { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::SW( + base_reg, + stored_reg, + VirtualImmediate12 { + value: word_offs as u16, + }, + )), + comment: "store value".into(), + owning_span, + }); + } + } else { + let base_reg = self.locals_base_reg().clone(); + + // Bigger than 1 word needs a MCPI. XXX Or MCP if it's huge. + let dest_offs_reg = self.reg_seqr.next(); + if word_offs * 8 > compiler_constants::TWELVE_BITS { + self.number_to_reg(word_offs * 8, &dest_offs_reg, owning_span.clone()); + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADD( + dest_offs_reg.clone(), + base_reg, + dest_offs_reg.clone(), + )), + comment: "get store offset".into(), + owning_span: owning_span.clone(), + }); + } else { + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADDI( + dest_offs_reg.clone(), + base_reg, + VirtualImmediate12 { + value: (word_offs * 8) as u16, + }, + )), + comment: "get store offset".into(), + owning_span: owning_span.clone(), + }); + } + + if store_size_in_words * 8 > compiler_constants::TWELVE_BITS { + let size_reg = self.reg_seqr.next(); + self.number_to_reg( + store_size_in_words * 8, + &size_reg, + owning_span.clone(), + ); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MCP( + dest_offs_reg, + stored_reg, + size_reg, + )), + comment: "store value".into(), + owning_span, + }); + } else { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MCPI( + dest_offs_reg, + stored_reg, + VirtualImmediate12 { + value: (store_size_in_words * 8) as u16, + }, + )), + comment: "store value".into(), + owning_span, + }); + } + } + } + }, + }; + ok((), Vec::new(), Vec::new()) + } + + fn resolve_ptr(&mut self, ptr_val: &Value) -> CompileResult<(Pointer, Pointer, u64)> { + match ptr_val.get_instruction(self.context) { + Some(Instruction::GetPointer { + base_ptr, + ptr_ty, + offset, + }) => ok((*base_ptr, *ptr_ty, *offset), Vec::new(), Vec::new()), + _otherwise => err( + Vec::new(), + vec![CompileError::Internal( + "Pointer arg for load/store is not a get_ptr instruction.", + self.md_mgr + .val_to_span(self.context, *ptr_val) + .unwrap_or_else(Self::empty_span), + )], + ), + } + } + + fn initialise_constant(&mut self, constant: &Constant, span: Option) -> VirtualRegister { + match &constant.value { + // Use cheaper $zero or $one registers if possible. + ConstantValue::Unit | ConstantValue::Bool(false) | ConstantValue::Uint(0) => { + VirtualRegister::Constant(ConstantRegister::Zero) + } + + ConstantValue::Bool(true) | ConstantValue::Uint(1) => { + VirtualRegister::Constant(ConstantRegister::One) + } + + _otherwise => { + // Get the constant into the namespace. + let entry = Entry::from_constant(self.context, constant); + let data_id = self.data_section.insert_data_value(entry); + + // Allocate a register for it, and a load instruction. + let reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::LWDataId(reg.clone(), data_id)), + comment: "literal instantiation".into(), + owning_span: span, + }); + reg + } + } + + // Insert the value into the map. + //self.reg_map.insert(*value, reg.clone()); + // + // Actually, no, don't. It's possible for constant values to be + // reused in the IR, especially with transforms which copy blocks + // around, like inlining. The `LW`/`LWDataId` instruction above + // initialises that constant value but it may be in a conditional + // block and not actually get evaluated for every possible + // execution. So using the register later on by pulling it from + // `self.reg_map` will have a potentially uninitialised register. + // + // By not putting it in the map we recreate the `LW` each time it's + // used, which also isn't ideal. A better solution is to put this + // initialisation into the IR itself, and allow for analysis there + // to determine when it may be initialised and/or reused. + } + + // Get the reg corresponding to `value`. Returns None if the value is not in reg_map or is not + // a constant. + fn opt_value_to_register(&mut self, value: &Value) -> Option { + self.reg_map.get(value).cloned().or_else(|| { + value.get_constant(self.context).map(|constant| { + let span = self.md_mgr.val_to_span(self.context, *value); + self.initialise_constant(constant, span) + }) + }) + } + + // Same as `opt_value_to_register` but returns a new register if no register is found or if + // `value` is not a constant. + fn value_to_register(&mut self, value: &Value) -> VirtualRegister { + match self.opt_value_to_register(value) { + Some(reg) => reg, + None => { + // Just make a new register for this value. + let reg = self.reg_seqr.next(); + self.reg_map.insert(*value, reg.clone()); + reg + } + } + } + + fn number_to_reg(&mut self, offset: u64, offset_reg: &VirtualRegister, span: Option) { + if offset > compiler_constants::TWENTY_FOUR_BITS { + todo!("Absolutely giant arrays."); + } + + // Use bitwise ORs and SHIFTs to crate a 24 bit value in a register. + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ORI( + offset_reg.clone(), + VirtualRegister::Constant(ConstantRegister::Zero), + VirtualImmediate12 { + value: (offset >> 12) as u16, + }, + )), + comment: "get extract offset high bits".into(), + owning_span: span.clone(), + }); + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::SLLI( + offset_reg.clone(), + offset_reg.clone(), + VirtualImmediate12 { value: 12 }, + )), + comment: "shift extract offset high bits".into(), + owning_span: span.clone(), + }); + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ORI( + offset_reg.clone(), + offset_reg.clone(), + VirtualImmediate12 { + value: (offset & 0xfff) as u16, + }, + )), + comment: "get extract offset low bits".into(), + owning_span: span, + }); + } + + pub(super) fn func_to_labels(&mut self, func: &Function) -> (Label, Label) { + self.func_label_map.get(func).cloned().unwrap_or_else(|| { + let labels = (self.reg_seqr.get_label(), self.reg_seqr.get_label()); + self.func_label_map.insert(*func, labels); + labels + }) + } + + fn block_to_label(&mut self, block: &Block) -> Label { + self.block_label_map.get(block).cloned().unwrap_or_else(|| { + let label = self.reg_seqr.get_label(); + self.block_label_map.insert(*block, label); + label + }) + } +} diff --git a/sway-core/src/asm_generation/asm_builder/functions.rs b/sway-core/src/asm_generation/asm_builder/functions.rs new file mode 100644 index 00000000000..d9a9402a711 --- /dev/null +++ b/sway-core/src/asm_generation/asm_builder/functions.rs @@ -0,0 +1,544 @@ +use super::{ + compiler_constants, ir_type_size_in_bytes, size_bytes_in_words, + size_bytes_round_up_to_word_alignment, AsmBuilder, +}; + +use crate::{ + asm_generation::{from_ir::*, Entry}, + asm_lang::{ + virtual_register::*, Op, OrganizationalOp, VirtualImmediate12, VirtualImmediate24, + VirtualOp, + }, + error::*, +}; + +use sway_ir::*; + +use fuel_asm::GTFArgs; + +use either::Either; + +/// A summary of the adopted calling convention: +/// +/// - Function arguments are passed left to right in the reserved registers. Extra args are passed +/// on the stack. +/// - The return value is returned in $retv. +/// - The return address is passed in $reta. +/// - All other general purpose registers must be preserved. +/// +/// If the return value has a copy-type it can be returned in $retv directly. If the return +/// value is a ref-type its space must be allocated by the caller and its address passed into +/// (and out of) the callee using $retv. +/// +/// The general process for a call is therefore the following. Not all steps are necessary, +/// depending on how many args and local variables the callee has, and whether the callee makes +/// its own calls. +/// +/// - Caller: +/// - Place function args into $rarg0 - $rargN and if necessary the stack. +/// - Allocate the return value on the stack if it's a reference type. +/// - Place the return address into $reta +/// - Jump to function address. +/// - If necessary restore the stack to free args. +/// - Callee: +/// - Save general purpose registers to the stack. +/// - Save the args registers, return value pointer and return address. +/// - Save room on the stack for locals. +/// - (Do work.) +/// - Put the result in return value. +/// - Restore the stack to free locals. +/// - Restore the return address. +/// - Restore the general purpose registers from the stack. +/// - Jump to the return address. + +impl<'ir> AsmBuilder<'ir> { + pub(super) fn compile_call(&mut self, instr_val: &Value, function: &Function, args: &[Value]) { + if !function.get_return_type(self.context).is_copy_type() { + // To implement non-copy type return values we will transform functions to return their + // value via an 'out' argument, either during IR generation or possibly with an IR + // transformation. + // + // This hasn't been done yet and will be addressed in a future change. Until then we + // will enforce functions returning non-copy type values are always inlined, and so + // we will not see them at this stage of the compiler. + unimplemented!( + "Can't do reference type return values yet (and should've been inlined). {}", + function.get_name(self.context) + ) + } + + // Put the args into the args registers. + for (idx, arg_val) in args.iter().enumerate() { + if idx < compiler_constants::NUM_ARG_REGISTERS as usize { + let arg_reg = self.value_to_register(arg_val); + self.cur_bytecode.push(Op::register_move( + VirtualRegister::Constant(ConstantRegister::ARG_REGS[idx]), + arg_reg, + format!("pass arg {idx}"), + self.md_mgr.val_to_span(self.context, *arg_val), + )); + } else { + todo!( + "can't do more than {} args yet", + compiler_constants::NUM_ARG_REGISTERS + ); + } + } + + // Set a new return address. + let ret_label = self.reg_seqr.get_label(); + self.cur_bytecode.push(Op::move_address( + VirtualRegister::Constant(ConstantRegister::CallReturnAddress), + ret_label, + "set new return addr", + None, + )); + + // Jump to function and insert return label. + let (fn_label, _) = self.func_to_labels(function); + self.cur_bytecode.push(Op { + opcode: Either::Right(OrganizationalOp::Call(fn_label)), + comment: format!("call {}", function.get_name(self.context)), + owning_span: None, + }); + self.cur_bytecode.push(Op::unowned_jump_label(ret_label)); + + // Save the return value. + let ret_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::MOVE( + ret_reg.clone(), + VirtualRegister::Constant(ConstantRegister::CallReturnValue), + )), + comment: "copy the return value".into(), + owning_span: None, + }); + self.reg_map.insert(*instr_val, ret_reg); + } + + pub(super) fn compile_ret_from_call( + &mut self, + instr_val: &Value, + ret_val: &Value, + ret_type: &Type, + ) { + if !ret_type.is_copy_type() { + // See above in compile_call(). + unimplemented!("Can't do reference type return values yet. {ret_type:?}") + } + + // Move the result into the return value register. + let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); + let ret_reg = self.value_to_register(ret_val); + self.cur_bytecode.push(Op::register_move( + VirtualRegister::Constant(ConstantRegister::CallReturnValue), + ret_reg, + "set return value", + owning_span, + )); + + // Jump to the end of the function. + let end_label = self + .return_ctxs + .last() + .expect("Calls guaranteed to save return context.") + .0; + self.cur_bytecode.push(Op::jump_to_label(end_label)); + } + + pub(crate) fn compile_function(&mut self, function: Function) -> CompileResult<()> { + assert!( + self.cur_bytecode.is_empty(), + "can't do nested functions yet" + ); + + if function.has_selector(self.context) { + // Add a comment noting that this is a named contract method. + self.cur_bytecode.push(Op::new_comment(format!( + "contract method: {}, selector: 0x{}", + function.get_name(self.context), + function + .get_selector(self.context) + .unwrap() + .into_iter() + .map(|b| format!("{b:02x}")) + .collect::() + ))); + } + + let func_has_selector = function.has_selector(self.context); + let func_is_main = function.get_name(self.context) == "main"; + let func_is_entry = func_has_selector || func_is_main; + + // Insert a function label. + let (start_label, end_label) = self.func_to_labels(&function); + let md = function.get_metadata(self.context); + let span = self.md_mgr.md_to_span(self.context, md); + let comment = format!( + "--- start of function: {} ---", + function.get_name(self.context) + ); + self.cur_bytecode.push(match span { + Some(span) => Op::jump_label_comment(start_label, span, comment), + None => Op::unowned_jump_label_comment(start_label, comment), + }); + + // Manage the call frame. + if !func_is_entry { + // Save any general purpose registers used here on the stack. + self.cur_bytecode.push(Op { + opcode: Either::Right(OrganizationalOp::PushAll(start_label)), + comment: "save all regs".to_owned(), + owning_span: None, + }); + } + + if func_is_entry { + // Read the args from VM/transaction memory. + self.compile_external_args(function, func_is_main) + } else { + // Make copies of the arg registers. + self.compile_fn_call_args(function) + } + + let reta = self.reg_seqr.next(); // XXX only do this if this function makes calls + if !func_is_entry { + // Save $reta and $retv + self.cur_bytecode.push(Op::register_move( + reta.clone(), + VirtualRegister::Constant(ConstantRegister::CallReturnAddress), + "save reta", + None, + )); + let retv = self.reg_seqr.next(); + self.cur_bytecode.push(Op::register_move( + retv.clone(), + VirtualRegister::Constant(ConstantRegister::CallReturnValue), + "save retv", + None, + )); + + // Store some info describing the call frame. + self.return_ctxs.push((end_label, retv)); + } + + self.init_locals(function); + + // Compile instructions. + let mut warnings = Vec::new(); + let mut errors = Vec::new(); + for block in function.block_iter(self.context) { + self.insert_block_label(block); + for instr_val in block.instruction_iter(self.context) { + check!( + self.compile_instruction(&block, &instr_val, func_is_entry), + return err(warnings, errors), + warnings, + errors + ); + } + } + + if !func_is_entry { + // Insert the end of function label. + self.cur_bytecode.push(Op::unowned_jump_label(end_label)); + + // Pop the call frame entry. + self.return_ctxs.pop(); + + // Free our stack allocated locals. This is unneeded for entries since they will have + // actually returned to the calling context via a VM RET. + self.drop_locals(function); + + // Restore $reta. + self.cur_bytecode.push(Op::register_move( + VirtualRegister::Constant(ConstantRegister::CallReturnAddress), + reta, + "restore reta", + None, + )); + + // Restore GP regs. + self.cur_bytecode.push(Op { + opcode: Either::Right(OrganizationalOp::PopAll(start_label)), + comment: "restore all regs".to_owned(), + owning_span: None, + }); + + // Jump to the return address. + self.cur_bytecode.push(Op::jump_to_register( + VirtualRegister::Constant(ConstantRegister::CallReturnAddress), + "return from call", + None, + )); + } + + // Save this function. + let mut ops = Vec::new(); + ops.append(&mut self.cur_bytecode); + if func_is_entry { + self.entries.push((function, start_label, ops)); + } else { + self.non_entries.push(ops); + } + + ok((), warnings, errors) + } + + fn compile_fn_call_args(&mut self, function: Function) { + // The first n args are passed in registers, but the rest arrive on the stack. + for (idx, (_, arg_val)) in function.args_iter(self.context).enumerate() { + if idx < compiler_constants::NUM_ARG_REGISTERS as usize { + // Make a copy of the args in case we make calls and need to use the arg registers. + let arg_copy_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op::register_move( + arg_copy_reg.clone(), + VirtualRegister::Constant(ConstantRegister::ARG_REGS[idx]), + format!("save arg {idx}"), + self.md_mgr.val_to_span(self.context, *arg_val), + )); + + // Remember our arg copy. + self.reg_map.insert(*arg_val, arg_copy_reg); + } else { + todo!( + "can't do more than {} args yet", + compiler_constants::NUM_ARG_REGISTERS + ); + } + } + } + + // Handle loading the arguments of a contract call + fn compile_external_args(&mut self, function: Function, from_script_data: bool) { + match function.args_iter(self.context).count() { + // Nothing to do if there are no arguments + 0 => (), + + // A special case for when there's only a single arg, its value (or address) is placed + // directly in the base register. + 1 => { + let (_, val) = function.args_iter(self.context).next().unwrap(); + let single_arg_reg = self.value_to_register(val); + if !from_script_data { + // The 'base' actually contains the argument. + self.read_args_base_from_frame(&single_arg_reg); + } else { + self.read_args_base_from_script_data(&single_arg_reg); + + // The base is an offset. Dereference it. + if val.get_type(self.context).unwrap().is_copy_type() { + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::LW( + single_arg_reg.clone(), + single_arg_reg.clone(), + VirtualImmediate12 { value: 0 }, + )), + comment: "load main fn parameter".into(), + owning_span: None, + }); + } + } + } + + // Otherwise, the args are bundled together and pointed to by the base register. + _ => { + let args_base_reg = self.reg_seqr.next(); + if !from_script_data { + self.read_args_base_from_frame(&args_base_reg); + } else { + self.read_args_base_from_script_data(&args_base_reg); + } + + // Successively load each argument. The asm generated depends on the arg type size + // and whether the offset fits in a 12-bit immediate. + let mut arg_word_offset = 0; + for (name, val) in function.args_iter(self.context) { + let current_arg_reg = self.value_to_register(val); + let arg_type = val.get_type(self.context).unwrap(); + let arg_type_size_bytes = ir_type_size_in_bytes(self.context, &arg_type); + if arg_type.is_copy_type() { + if arg_word_offset > compiler_constants::TWELVE_BITS { + let offs_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::ADD( + args_base_reg.clone(), + args_base_reg.clone(), + offs_reg.clone(), + )), + comment: format!("get offset for arg {}", name), + owning_span: None, + }); + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LW( + current_arg_reg.clone(), + offs_reg, + VirtualImmediate12 { value: 0 }, + )), + comment: format!("get arg {}", name), + owning_span: None, + }); + } else { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LW( + current_arg_reg.clone(), + args_base_reg.clone(), + VirtualImmediate12 { + value: arg_word_offset as u16, + }, + )), + comment: format!("get arg {}", name), + owning_span: None, + }); + } + } else if arg_word_offset * 8 > compiler_constants::TWELVE_BITS { + let offs_reg = self.reg_seqr.next(); + self.number_to_reg(arg_word_offset * 8, &offs_reg, None); + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADD( + current_arg_reg.clone(), + args_base_reg.clone(), + offs_reg, + )), + comment: format!("get offset or arg {}", name), + owning_span: None, + }); + } else { + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::ADDI( + current_arg_reg.clone(), + args_base_reg.clone(), + VirtualImmediate12 { + value: (arg_word_offset * 8) as u16, + }, + )), + comment: format!("get address for arg {}", name), + owning_span: None, + }); + } + + arg_word_offset += size_bytes_in_words!(arg_type_size_bytes); + } + } + } + } + + // Read the argument(s) base from the call frame. + fn read_args_base_from_frame(&mut self, reg: &VirtualRegister) { + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::LW( + reg.clone(), + VirtualRegister::Constant(ConstantRegister::FramePointer), + // see https://github.com/FuelLabs/fuel-specs/pull/193#issuecomment-876496372 + VirtualImmediate12 { value: 74 }, + )), + comment: "base register for method parameter".into(), + owning_span: None, + }); + } + + // Read the argument(s) base from the script data. + fn read_args_base_from_script_data(&mut self, reg: &VirtualRegister) { + self.cur_bytecode.push(Op { + opcode: either::Either::Left(VirtualOp::GTF( + reg.clone(), + VirtualRegister::Constant(ConstantRegister::Zero), + VirtualImmediate12 { + value: GTFArgs::ScriptData as u16, + }, + )), + comment: "base register for main fn parameter".into(), + owning_span: None, + }); + } + + fn init_locals(&mut self, function: Function) { + // If they're immutable and have a constant initialiser then they go in the data section. + // Otherwise they go in runtime allocated space, either a register or on the stack. + // + // Stack offsets are in words to both enforce alignment and simplify use with LW/SW. + let mut stack_base = 0_u64; + for (_name, ptr) in function.locals_iter(self.context) { + if !ptr.is_mutable(self.context) && ptr.get_initializer(self.context).is_some() { + let constant = ptr.get_initializer(self.context).unwrap(); + let data_id = self + .data_section + .insert_data_value(Entry::from_constant(self.context, constant)); + self.ptr_map.insert(*ptr, Storage::Data(data_id)); + } else { + match ptr.get_type(self.context) { + Type::Unit | Type::Bool | Type::Uint(_) | Type::Pointer(_) => { + self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); + stack_base += 1; + } + Type::B256 => { + // XXX Like strings, should we just reserve space for a pointer? + self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); + stack_base += 4; + } + Type::String(n) => { + // Strings are always constant and used by reference, so we only store the + // pointer on the stack. + self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); + stack_base += size_bytes_round_up_to_word_alignment!(n) + } + ty @ (Type::Array(_) | Type::Struct(_) | Type::Union(_)) => { + // Store this aggregate at the current stack base. + self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); + + // Reserve space by incrementing the base. + stack_base += size_bytes_in_words!(ir_type_size_in_bytes(self.context, ty)); + } + }; + } + } + + // Reserve space on the stack (in bytes) for all our locals which require it. Firstly save + // the current $sp. + let locals_base_reg = self.reg_seqr.next(); + self.cur_bytecode.push(Op::register_move( + locals_base_reg.clone(), + VirtualRegister::Constant(ConstantRegister::StackPointer), + "save locals base register", + None, + )); + + let locals_size = stack_base * 8; + if locals_size != 0 { + if locals_size > compiler_constants::TWENTY_FOUR_BITS { + todo!("Enormous stack usage for locals."); + } + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::CFEI(VirtualImmediate24 { + value: locals_size as u32, + })), + comment: format!("allocate {} bytes for locals", locals_size), + owning_span: None, + }); + } + self.locals_ctxs.push((locals_size, locals_base_reg)); + } + + fn drop_locals(&mut self, _function: Function) { + let (locals_size, _locals_base_reg) = self + .locals_ctxs + .pop() + .expect("Calls guaranteed to save locals context."); + if locals_size != 0 { + if locals_size > compiler_constants::TWENTY_FOUR_BITS { + todo!("Enormous stack usage for locals."); + } + self.cur_bytecode.push(Op { + opcode: Either::Left(VirtualOp::CFSI(VirtualImmediate24 { + value: locals_size as u32, + })), + comment: format!("free {} bytes for locals", locals_size), + owning_span: None, + }); + } + } + + pub(super) fn locals_base_reg(&self) -> &VirtualRegister { + &self.locals_ctxs.last().expect("No locals").1 + } +} diff --git a/sway-core/src/asm_generation/compiler_constants.rs b/sway-core/src/asm_generation/compiler_constants.rs index 1cfe092032a..dbce09410c8 100644 --- a/sway-core/src/asm_generation/compiler_constants.rs +++ b/sway-core/src/asm_generation/compiler_constants.rs @@ -2,16 +2,37 @@ /// to use. Registers reserved by the compiler are contained within these. const NUM_TOTAL_REGISTERS: u8 = 64; const NUM_FREE_REGISTERS: u8 = 48; -pub(crate) const TWENTY_FOUR_BITS: u64 = 0b1111_1111_1111_1111_1111_1111; -pub(crate) const EIGHTEEN_BITS: u64 = 0b11_1111_1111_1111_1111; -pub(crate) const TWELVE_BITS: u64 = 0b1111_1111_1111; -pub(crate) const SIX_BITS: u64 = 0b11_1111; /// This is the number of registers reserved by the compiler. Adjust this number if a new /// reservation must be made. /// So far, the compiler-reserved registers are: -/// 1. DATA_SECTION_BEGIN -const NUM_COMPILER_RESERVED_REGISTERS: u8 = 1; +/// 1. DATA_SECTION_BEGIN - the offset to the read only data section. +/// 2. RETURN_ADDRESS - where a function must return to. +/// 3. RETURN_VALUE - the value returned by a _function_ call. +/// 4. SCRATCH - used for certain operations which need a register temporarily, such as JMP. +/// 5. ARGS - for passing arguments to function calls. +const NUM_COMPILER_RESERVED_REGISTERS: u8 = 4 + NUM_ARG_REGISTERS; + pub(crate) const DATA_SECTION_REGISTER: u8 = NUM_TOTAL_REGISTERS - 1; +pub(crate) const RETURN_ADDRESS_REGISTER: u8 = NUM_TOTAL_REGISTERS - 2; +pub(crate) const RETURN_VALUE_REGISTER: u8 = NUM_TOTAL_REGISTERS - 3; +pub(crate) const SCRATCH_REGISTER: u8 = NUM_TOTAL_REGISTERS - 4; + +pub(crate) const NUM_ARG_REGISTERS: u8 = 6; +pub(crate) const ARG_REG0: u8 = NUM_TOTAL_REGISTERS - 5; +pub(crate) const ARG_REG1: u8 = NUM_TOTAL_REGISTERS - 6; +pub(crate) const ARG_REG2: u8 = NUM_TOTAL_REGISTERS - 7; +pub(crate) const ARG_REG3: u8 = NUM_TOTAL_REGISTERS - 8; +pub(crate) const ARG_REG4: u8 = NUM_TOTAL_REGISTERS - 9; +pub(crate) const ARG_REG5: u8 = NUM_TOTAL_REGISTERS - 10; + pub(crate) const NUM_ALLOCATABLE_REGISTERS: u8 = NUM_FREE_REGISTERS - NUM_COMPILER_RESERVED_REGISTERS; + +pub(crate) const TWENTY_FOUR_BITS: u64 = 0b1111_1111_1111_1111_1111_1111; +pub(crate) const EIGHTEEN_BITS: u64 = 0b11_1111_1111_1111_1111; +pub(crate) const TWELVE_BITS: u64 = 0b1111_1111_1111; +pub(crate) const SIX_BITS: u64 = 0b11_1111; + +/// Some arbitrary values used for error codes. +pub(crate) const MISMATCHED_SELECTOR_REVERT_CODE: u32 = 123; diff --git a/sway-core/src/asm_generation/data_section.rs b/sway-core/src/asm_generation/data_section.rs index 1ac29e5c42c..b9a5e12cbd6 100644 --- a/sway-core/src/asm_generation/data_section.rs +++ b/sway-core/src/asm_generation/data_section.rs @@ -1,7 +1,149 @@ -use crate::{parse_tree::*, type_system::*}; +use crate::asm_generation::from_ir::ir_type_size_in_bytes; + +use sway_ir::{AggregateContent, Constant, ConstantValue, Context, Type}; + use std::fmt::{self, Write}; -type Data = Literal; +// An entry in the data section. It's important for the size to be correct, especially for unions +// where the size could be larger than the represented value. +#[derive(Clone, Debug)] +pub struct Entry { + value: Datum, + size: usize, +} + +#[derive(Clone, Debug)] +pub enum Datum { + Word(u64), + ByteArray(Vec), + Collection(Vec), +} + +impl Entry { + pub(crate) fn new_word(value: u64, size: Option) -> Entry { + Entry { + value: Datum::Word(value), + size: size.unwrap_or(8), + } + } + + pub(crate) fn new_byte_array(bytes: Vec, size: Option) -> Entry { + let size = size.unwrap_or(bytes.len()); + Entry { + value: Datum::ByteArray(bytes), + size, + } + } + + pub(crate) fn new_collection(elements: Vec, size: Option) -> Entry { + let size = size.unwrap_or_else(|| elements.iter().map(|el| el.size).sum()); + Entry { + value: Datum::Collection(elements), + size, + } + } + + pub(crate) fn from_constant(context: &Context, constant: &Constant) -> Entry { + // We have to do some painful special handling here for enums, which are tagged unions. + // This really should be handled by the IR more explicitly and is something that will + // hopefully be addressed by https://github.com/FuelLabs/sway/issues/2819#issuecomment-1256930392 + let size = Some(ir_type_size_in_bytes(context, &constant.ty) as usize); + + // Is this constant a tagged union? + if let Type::Struct(struct_agg) = &constant.ty { + if let AggregateContent::FieldTypes(field_tys) = struct_agg.get_content(context) { + if field_tys.len() == 2 + && matches!( + (field_tys[0], field_tys[1]), + (Type::Uint(_), Type::Union(_)) + ) + { + // OK, this looks very much like a tagged union enum, which is the only place + // we use unions (otherwise we should be generalising this a bit more). + if let ConstantValue::Struct(els) = &constant.value { + if els.len() == 2 { + let tag_entry = Entry::from_constant(context, &els[0]); + + // Here's the special case. We need to get the size of the union and + // attach it to this constant entry which will be one of the variants. + let mut val_entry = Entry::from_constant(context, &els[1]); + val_entry.size = ir_type_size_in_bytes(context, &field_tys[1]) as usize; + + // Return here from our special case. + return Entry::new_collection(vec![tag_entry, val_entry], size); + } + } + } + } + }; + + // Not a tagged union, no trickiness required. + match &constant.value { + ConstantValue::Undef | ConstantValue::Unit => Entry::new_word(0, size), + ConstantValue::Bool(b) => Entry::new_word(if *b { 1 } else { 0 }, size), + ConstantValue::Uint(u) => Entry::new_word(*u, size), + + ConstantValue::B256(bs) => Entry::new_byte_array(bs.to_vec(), size), + ConstantValue::String(bs) => Entry::new_byte_array(bs.clone(), size), + + ConstantValue::Array(els) | ConstantValue::Struct(els) => Entry::new_collection( + els.iter() + .map(|el| Entry::from_constant(context, el)) + .collect(), + size, + ), + } + } + + /// Converts a literal to a big-endian representation. This is padded to words. + pub(crate) fn to_bytes(&self) -> Vec { + // Get the big-endian byte representation of the basic value. + let mut bytes = match &self.value { + Datum::Word(w) => w.to_be_bytes().to_vec(), + Datum::ByteArray(bs) if bs.len() % 8 == 0 => bs.clone(), + Datum::ByteArray(bs) => bs + .iter() + .chain(vec![0; 8].iter()) + .copied() + .take((bs.len() + 7) & 0xfffffff8_usize) + .collect(), + Datum::Collection(els) => els.iter().flat_map(|el| el.to_bytes()).collect(), + }; + + // Pad the size out to match the specified size. + if self.size > bytes.len() { + let mut pad = vec![0; self.size - bytes.len()]; + pad.append(&mut bytes); + bytes = pad; + } + + bytes + } + + pub(crate) fn has_copy_type(&self) -> bool { + matches!(self.value, Datum::Word(_)) + } + + pub(crate) fn equiv(&self, entry: &Entry) -> bool { + fn equiv_data(lhs: &Datum, rhs: &Datum) -> bool { + match (lhs, rhs) { + (Datum::Word(l), Datum::Word(r)) => l == r, + (Datum::ByteArray(l), Datum::ByteArray(r)) => l == r, + + (Datum::Collection(l), Datum::Collection(r)) => { + l.len() == r.len() + && l.iter() + .zip(r.iter()) + .all(|(l, r)| equiv_data(&l.value, &r.value)) + } + + _ => false, + } + } + + equiv_data(&self.value, &entry.value) + } +} /// An address which refers to a value in the data section of the asm. #[derive(Clone, Debug)] @@ -16,7 +158,7 @@ impl fmt::Display for DataId { #[derive(Default, Clone, Debug)] pub struct DataSection { /// the data to be put in the data section of the asm - pub value_pairs: Vec, + pub value_pairs: Vec, } impl DataSection { @@ -33,15 +175,17 @@ impl DataSection { pub(crate) fn serialize_to_bytes(&self) -> Vec { // not the exact right capacity but serves as a lower bound let mut buf = Vec::with_capacity(self.value_pairs.len()); - for val in &self.value_pairs { - buf.append(&mut val.to_bytes().to_vec()); + for entry in &self.value_pairs { + buf.append(&mut entry.to_bytes()); } buf } - /// Calculates the return type of the data held at a specific [DataId]. - pub(crate) fn type_of_data(&self, id: &DataId) -> Option { - self.value_pairs.get(id.0 as usize).map(|x| x.as_type()) + /// Returns whether a specific [DataId] value has a copy type (fits in a register). + pub(crate) fn has_copy_type(&self, id: &DataId) -> Option { + self.value_pairs + .get(id.0 as usize) + .map(|entry| entry.has_copy_type()) } /// When generating code, sometimes a hard-coded data pointer is needed to reference @@ -51,19 +195,23 @@ impl DataSection { /// `pointer_value` is in _bytes_ and refers to the offset from instruction start to the data /// in question. pub(crate) fn append_pointer(&mut self, pointer_value: u64) -> DataId { - let pointer_as_data = Literal::new_pointer_literal(pointer_value); - self.insert_data_value(&pointer_as_data) + // The 'pointer' is just a literal 64 bit address. + self.insert_data_value(Entry::new_word(pointer_value, None)) } /// Given any data in the form of a [Literal] (using this type mainly because it includes type /// information and debug spans), insert it into the data section and return its offset as a /// [DataId]. - pub(crate) fn insert_data_value(&mut self, data: &Literal) -> DataId { + pub(crate) fn insert_data_value(&mut self, new_entry: Entry) -> DataId { // if there is an identical data value, use the same id - match self.value_pairs.iter().position(|x| x == data) { + match self + .value_pairs + .iter() + .position(|entry| entry.equiv(&new_entry)) + { Some(num) => DataId(num as u32), None => { - self.value_pairs.push(data.clone()); + self.value_pairs.push(new_entry); // the index of the data section where the value is stored DataId((self.value_pairs.len() - 1) as u32) } @@ -73,27 +221,40 @@ impl DataSection { impl fmt::Display for DataSection { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut data_buf = String::new(); - for (ix, data) in self.value_pairs.iter().enumerate() { - let data_val = match data { - Literal::U8(num) => format!(".u8 {:#04x}", num), - Literal::U16(num) => format!(".u16 {:#04x}", num), - Literal::U32(num) => format!(".u32 {:#04x}", num), - Literal::U64(num) => format!(".u64 {:#04x}", num), - Literal::Numeric(num) => format!(".u64 {:#04x}", num), - Literal::Boolean(b) => format!(".bool {}", if *b { "0x01" } else { "0x00" }), - Literal::String(st) => format!(".str \"{}\"", st.as_str()), - Literal::Byte(b) => format!(".byte {:#08b}", b), - Literal::B256(b) => format!( - ".b256 0x{}", - b.iter() - .map(|x| format!("{:02x}", x)) + fn display_entry(datum: &Datum) -> String { + match datum { + Datum::Word(w) => format!(".word {w}"), + Datum::ByteArray(bs) => { + let mut hex_str = String::new(); + let mut chr_str = String::new(); + for b in bs { + hex_str.push_str(format!("{b:02x} ").as_str()); + chr_str.push(if *b == b' ' || b.is_ascii_graphic() { + *b as char + } else { + '.' + }); + } + format!(".bytes[{}] {hex_str} {chr_str}", bs.len()) + } + Datum::Collection(els) => format!( + ".collection {{ {} }}", + els.iter() + .map(|el| display_entry(&el.value)) .collect::>() - .join("") + .join(", ") ), - }; - let data_label = DataId(ix as u32); - writeln!(data_buf, "{} {}", data_label, data_val)?; + } + } + + let mut data_buf = String::new(); + for (ix, entry) in self.value_pairs.iter().enumerate() { + writeln!( + data_buf, + "{} {}", + DataId(ix as u32), + display_entry(&entry.value) + )?; } write!(f, ".data:\n{}", data_buf) diff --git a/sway-core/src/asm_generation/finalized_asm.rs b/sway-core/src/asm_generation/finalized_asm.rs index 890aeb3699f..567ee58691c 100644 --- a/sway-core/src/asm_generation/finalized_asm.rs +++ b/sway-core/src/asm_generation/finalized_asm.rs @@ -99,9 +99,8 @@ fn to_bytecode_mut( .fold(0, |acc, item| match &item.opcode { AllocatedOpcode::LWDataId(_reg, data_label) if !data_section - .type_of_data(data_label) - .expect("data label references non existent data -- internal error") - .is_copy_type() => + .has_copy_type(data_label) + .expect("data label references non existent data -- internal error") => { acc + 8 } diff --git a/sway-core/src/asm_generation/from_ir.rs b/sway-core/src/asm_generation/from_ir.rs index e9409dd0d0d..4ad2edaf848 100644 --- a/sway-core/src/asm_generation/from_ir.rs +++ b/sway-core/src/asm_generation/from_ir.rs @@ -1,160 +1,127 @@ -// ================================================================================================= -// Newer IR code gen. -// -// NOTE: This is converting IR to Vec first, and then to finalized VM bytecode much like the -// original code. This is to keep things simple, and to reuse the current tools like DataSection. -// -// But this is not ideal and needs to be refactored: -// - AsmNamespace is tied to data structures from other stages like Ident and Literal. - -use fuel_asm::GTFArgs; -use fuel_crypto::Hasher; -use std::{collections::HashMap, sync::Arc}; - -use crate::{ - asm_generation::{ - build_contract_abi_switch, build_preamble, checks::check_invalid_opcodes, - compiler_constants, finalized_asm::FinalizedAsm, register_sequencer::RegisterSequencer, - AbstractInstructionSet, DataId, DataSection, SwayAsmSet, - }, - asm_lang::{ - virtual_register::*, Label, Op, VirtualImmediate12, VirtualImmediate18, VirtualImmediate24, - VirtualOp, - }, - error::*, - metadata::MetadataManager, - parse_tree::Literal, - BuildConfig, +use super::{ + asm_builder::AsmBuilder, + checks::check_invalid_opcodes, + finalized_asm::FinalizedAsm, + programs::{AbstractProgram, ProgramKind}, + register_sequencer::RegisterSequencer, + DataId, DataSection, }; -use sway_ir::*; -use sway_types::{span::Span, Spanned}; +use crate::{error::*, BuildConfig}; -use either::Either; +use sway_ir::*; pub fn compile_ir_to_asm( ir: &Context, build_config: Option<&BuildConfig>, ) -> CompileResult { - let mut warnings: Vec = Vec::new(); - let mut errors: Vec = Vec::new(); - - let mut reg_seqr = RegisterSequencer::new(); - let mut bytecode: Vec = build_preamble(&mut reg_seqr).to_vec(); - // Eventually when we get this 'correct' with no hacks we'll want to compile all the modules // separately and then use a linker to connect them. This way we could also keep binary caches - // of libraries and link against them, rather than recompile everything each time. + // of libraries and link against them, rather than recompile everything each time. For now we + // assume there is one module. assert!(ir.module_iter().count() == 1); + + let mut warnings: Vec = Vec::new(); + let mut errors: Vec = Vec::new(); + let module = ir.module_iter().next().unwrap(); - let (data_section, mut ops, mut reg_seqr) = check!( - compile_module_to_asm(reg_seqr, ir, module), + let abstract_program = check!( + compile_module_to_asm(RegisterSequencer::new(), ir, module), return err(warnings, errors), warnings, errors ); - bytecode.append(&mut ops); - let asm = match module.get_kind(ir) { - Kind::Script => SwayAsmSet::ScriptMain { - program_section: AbstractInstructionSet { ops: bytecode }, - data_section, - }, - Kind::Contract => SwayAsmSet::ContractAbi { - program_section: AbstractInstructionSet { ops: bytecode }, - data_section, - }, - Kind::Library | Kind::Predicate => todo!("libraries and predicates coming soon!"), - }; + if build_config + .map(|cfg| cfg.print_intermediate_asm) + .unwrap_or(false) + { + println!(";; --- ABSTRACT VIRTUAL PROGRAM ---\n"); + println!("{abstract_program}\n"); + } + + let allocated_program = abstract_program.into_allocated_program(); if build_config .map(|cfg| cfg.print_intermediate_asm) .unwrap_or(false) { - tracing::info!("{}", asm); + println!(";; --- ABSTRACT ALLOCATED PROGRAM ---\n"); + println!("{allocated_program}"); } - let finalized_asm = asm - .remove_unnecessary_jumps() - .allocate_registers(&mut reg_seqr) - .optimize(); + let final_program = allocated_program.into_final_program(); if build_config .map(|cfg| cfg.print_finalized_asm) .unwrap_or(false) { - tracing::info!("{}", finalized_asm); + println!(";; --- FINAL PROGRAM ---\n"); + println!("{final_program}"); } + let final_asm = final_program.finalize(); + check!( - check_invalid_opcodes(&finalized_asm), + check_invalid_opcodes(&final_asm), return err(warnings, errors), warnings, errors ); - ok(finalized_asm, warnings, errors) + ok(final_asm, warnings, errors) } fn compile_module_to_asm( reg_seqr: RegisterSequencer, context: &Context, module: Module, -) -> CompileResult<(DataSection, Vec, RegisterSequencer)> { +) -> CompileResult { let mut builder = AsmBuilder::new(DataSection::default(), reg_seqr, context); - match module.get_kind(context) { - Kind::Script => { - // We can't do function calls yet, so we expect everything to be inlined into `main`. - let function = module - .function_iter(context) - .find(|func| &context.functions[func.0].name == "main") - .expect("Can't find main function!"); - builder - .compile_function(function) - .flat_map(|_| builder.finalize()) - } - Kind::Contract => { - let mut warnings = Vec::new(); - let mut errors = Vec::new(); - let mut selectors_and_labels: Vec<([u8; 4], Label)> = Vec::new(); + // Pre-create labels for all functions before we generate other code, so we can call them + // before compiling them if needed. + for func in module.function_iter(context) { + builder.func_to_labels(&func); + } - // Compile only the functions which have selectors and gather the selectors and labels. - for function in module.function_iter(context) { - if function.has_selector(context) { - let selector = function.get_selector(context).unwrap(); - let label = builder.add_label(); - check!( - builder.compile_function(function), - return err(warnings, errors), - warnings, - errors - ); - selectors_and_labels.push((selector, label)); - } - } - let (mut data_section, mut funcs_bytecode, mut reg_seqr) = check!( - builder.finalize(), - return err(warnings, errors), - warnings, - errors - ); + let mut warnings = Vec::new(); + let mut errors = Vec::new(); - let mut bytecode_with_switch = - build_contract_abi_switch(&mut reg_seqr, &mut data_section, selectors_and_labels); - bytecode_with_switch.append(&mut funcs_bytecode); - ok( - (data_section, bytecode_with_switch, reg_seqr), - warnings, - errors, - ) - } - Kind::Library | Kind::Predicate => todo!("libraries and predicates coming soon!"), + for function in module.function_iter(context) { + check!( + builder.compile_function(function), + return err(warnings, errors), + warnings, + errors + ); } + + // Get the compiled result and massage a bit for the AbstractProgram. + let (data_section, reg_seqr, entries, non_entries) = builder.finalize(); + let entries = entries + .into_iter() + .map(|(func, label, ops)| { + let selector = func.get_selector(context); + (selector, label, ops) + }) + .collect(); + let kind = match module.get_kind(context) { + Kind::Contract => ProgramKind::Contract, + Kind::Script => ProgramKind::Script, + Kind::Library | Kind::Predicate => todo!("libraries and predicates coming soon!"), + }; + + ok( + AbstractProgram::new(kind, data_section, entries, non_entries, reg_seqr), + warnings, + errors, + ) } // ------------------------------------------------------------------------------------------------- +#[macro_export] macro_rules! size_bytes_in_words { ($bytes_expr: expr) => { ($bytes_expr + 7) / 8 @@ -162,40 +129,13 @@ macro_rules! size_bytes_in_words { } // This is a mouthful... +#[macro_export] macro_rules! size_bytes_round_up_to_word_alignment { ($bytes_expr: expr) => { ($bytes_expr + 7) - (($bytes_expr + 7) % 8) }; } -struct AsmBuilder<'ir> { - // Data section is used by the rest of code gen to layout const memory. - data_section: DataSection, - - // Register sequencer dishes out new registers and labels. - reg_seqr: RegisterSequencer, - - // Label map is from IR block to label name. - label_map: HashMap, - - // Reg map is tracking IR values to VM values. Ptr map is tracking IR pointers to local - // storage types. - reg_map: HashMap, - ptr_map: HashMap, - - // Stack base register, copied from $SP at the start, but only if we have stack storage. - stack_base_reg: Option, - - // IR context we're compiling. - context: &'ir Context, - - // Metadata manager for converting metadata to Spans, etc. - md_mgr: MetadataManager, - - // Final resulting VM bytecode ops. - bytecode: Vec, -} - // NOTE: For stack storage we need to be aware: // - sizes are in bytes; CFEI reserves in bytes. // - offsets are in 64-bit words; LW/SW reads/writes to word offsets. XXX Wrap in a WordOffset struct. @@ -211,2310 +151,20 @@ pub enum StateAccessType { Write, } -impl<'ir> AsmBuilder<'ir> { - fn new(data_section: DataSection, reg_seqr: RegisterSequencer, context: &'ir Context) -> Self { - AsmBuilder { - data_section, - reg_seqr, - label_map: HashMap::new(), - reg_map: HashMap::new(), - ptr_map: HashMap::new(), - stack_base_reg: None, - context, - md_mgr: MetadataManager::default(), - bytecode: Vec::new(), - } - } - - // This is here temporarily for in the case when the IR can't absolutely provide a valid span, - // until we can improve ASM block parsing and verification mostly. It's where it's needed the - // most, for returning failure errors. If we move ASM verification to the parser and semantic - // analysis then ASM block conversion shouldn't/can't fail and we won't need to provide a - // guaranteed to be available span. - fn empty_span() -> Span { - let msg = "unknown source location"; - Span::new(Arc::from(msg), 0, msg.len(), None).unwrap() - } - - // Handle loading the arguments of a contract call - fn compile_fn_args(&mut self, function: Function) { - // We treat contract methods differently. Contract methods have selectors. - let is_contract_method = function.has_selector(self.context); - - match function.args_iter(self.context).count() { - // Nothing to do if there are no arguments - 0 => (), - - // A special case for when there's only a single arg, its value (or address) is placed - // directly in the base register. - 1 => { - let (_, val) = function.args_iter(self.context).next().unwrap(); - let single_arg_reg = self.value_to_register(val); - - if is_contract_method { - self.read_args_value_from_frame(&single_arg_reg); - } else { - self.read_args_value_from_script_data(&single_arg_reg); - - if val.get_type(self.context).unwrap().is_copy_type() { - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::LW( - single_arg_reg.clone(), - single_arg_reg.clone(), - VirtualImmediate12 { value: 0 }, - )), - comment: "Load main fn parameter".into(), - owning_span: None, - }); - } - } - } - - // Otherwise, the args are bundled together and pointed to by the base register. - _ => { - let args_base_reg = self.reg_seqr.next(); - - if is_contract_method { - self.read_args_value_from_frame(&args_base_reg); - } else { - self.read_args_value_from_script_data(&args_base_reg); - } - - // Successively load each argument. The asm generated depends on the arg type size - // and whether the offset fits in a 12-bit immediate. - let mut arg_word_offset = 0; - for (name, val) in function.args_iter(self.context) { - let current_arg_reg = self.value_to_register(val); - let arg_type = val.get_type(self.context).unwrap(); - let arg_type_size_bytes = ir_type_size_in_bytes(self.context, &arg_type); - if arg_type.is_copy_type() { - if arg_word_offset > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - args_base_reg.clone(), - args_base_reg.clone(), - offs_reg.clone(), - )), - comment: format!("Get offset for arg {}", name), - owning_span: None, - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - current_arg_reg.clone(), - offs_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: format!("Get arg {}", name), - owning_span: None, - }); - } else { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - current_arg_reg.clone(), - args_base_reg.clone(), - VirtualImmediate12 { - value: arg_word_offset as u16, - }, - )), - comment: format!("Get arg {}", name), - owning_span: None, - }); - } - } else if arg_word_offset * 8 > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg(arg_word_offset * 8, &offs_reg, None); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - current_arg_reg.clone(), - args_base_reg.clone(), - offs_reg, - )), - comment: format!("Get offset or arg {}", name), - owning_span: None, - }); - } else { - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - current_arg_reg.clone(), - args_base_reg.clone(), - VirtualImmediate12 { - value: (arg_word_offset * 8) as u16, - }, - )), - comment: format!("Get address for arg {}", name), - owning_span: None, - }); - } - - arg_word_offset += size_bytes_in_words!(arg_type_size_bytes); - } - } - } - } - - // Read the argument(s) base from the call frame. - fn read_args_value_from_frame(&mut self, reg: &VirtualRegister) { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - reg.clone(), - VirtualRegister::Constant(ConstantRegister::FramePointer), - // see https://github.com/FuelLabs/fuel-specs/pull/193#issuecomment-876496372 - VirtualImmediate12 { value: 74 }, - )), - comment: "Base register for method parameter".into(), - owning_span: None, - }); - } - - // Read the argument(s) base from the script data. - fn read_args_value_from_script_data(&mut self, reg: &VirtualRegister) { - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::GTF( - reg.clone(), - VirtualRegister::Constant(ConstantRegister::Zero), - VirtualImmediate12 { - value: GTFArgs::ScriptData as u16, - }, - )), - comment: "Base register for main fn parameter".into(), - owning_span: None, - }); - } - - fn add_locals(&mut self, function: Function) { - // If they're immutable and have a constant initialiser then they go in the data section. - // Otherwise they go in runtime allocated space, either a register or on the stack. - // - // Stack offsets are in words to both enforce alignment and simplify use with LW/SW. - let mut stack_base = 0_u64; - for (_name, ptr) in function.locals_iter(self.context) { - let ptr_content = &self.context.pointers[ptr.0]; - if !ptr_content.is_mutable && ptr_content.initializer.is_some() { - let constant = ptr_content.initializer.as_ref().unwrap(); - let lit = ir_constant_to_ast_literal(constant); - let data_id = self.data_section.insert_data_value(&lit); - self.ptr_map.insert(*ptr, Storage::Data(data_id)); - } else { - match ptr_content.ty { - Type::Unit | Type::Bool | Type::Uint(_) | Type::Pointer(_) => { - self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); - stack_base += 1; - } - Type::B256 => { - // XXX Like strings, should we just reserve space for a pointer? - self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); - stack_base += 4; - } - Type::String(n) => { - // Strings are always constant and used by reference, so we only store the - // pointer on the stack. - self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); - stack_base += size_bytes_round_up_to_word_alignment!(n) - } - Type::Array(_) | Type::Struct(_) | Type::Union(_) => { - // Store this aggregate at the current stack base. - self.ptr_map.insert(*ptr, Storage::Stack(stack_base)); - - // Reserve space by incrementing the base. - stack_base += size_bytes_in_words!(ir_type_size_in_bytes( - self.context, - &ptr_content.ty - )); - } - }; - } - } - - // Reserve space on the stack for ALL our locals which require it. - if !self.ptr_map.is_empty() { - let base_reg = self.reg_seqr.next(); - self.bytecode.push(Op::unowned_register_move_comment( - base_reg.clone(), - VirtualRegister::Constant(ConstantRegister::StackPointer), - "save locals base register", - )); - - // It's possible (though undesirable) to have empty local data structures only. - if stack_base != 0 { - if stack_base * 8 > compiler_constants::TWENTY_FOUR_BITS { - todo!("Enormous stack usage for locals."); - } - let mut alloc_op = Op::unowned_stack_allocate_memory(VirtualImmediate24 { - value: (stack_base * 8) as u32, - }); - alloc_op.comment = format!("allocate {} bytes for all locals", stack_base * 8); - self.bytecode.push(alloc_op); - } - self.stack_base_reg = Some(base_reg); - } - } - - fn add_block_label(&mut self, block: Block) { - if &block.get_label(self.context) != "entry" { - let label = self.block_to_label(&block); - self.bytecode.push(Op::unowned_jump_label(label)) - } - } - - fn add_label(&mut self) -> Label { - let label = self.reg_seqr.get_label(); - self.bytecode.push(Op::unowned_jump_label(label.clone())); - label - } - - fn finalize(self) -> CompileResult<(DataSection, Vec, RegisterSequencer)> { - // XXX Assuming no warnings... - ok( - (self.data_section, self.bytecode, self.reg_seqr), - Vec::new(), - Vec::new(), - ) - } - - fn compile_function(&mut self, function: Function) -> CompileResult<()> { - if function.has_selector(self.context) { - // Add a comment noting that this is a named contract method. - self.bytecode.push(Op::new_comment(format!( - "contract method: {}, selector: 0x{}", - function.get_name(self.context), - function - .get_selector(self.context) - .unwrap() - .into_iter() - .map(|b| format!("{b:02x}")) - .collect::() - ))); - } - - // Compile instructions. - self.add_locals(function); - self.compile_fn_args(function); - let mut warnings = Vec::new(); - let mut errors = Vec::new(); - for block in function.block_iter(self.context) { - self.add_block_label(block); - for instr_val in block.instruction_iter(self.context) { - check!( - self.compile_instruction(&block, &instr_val), - return err(warnings, errors), - warnings, - errors - ); - } - } - ok((), warnings, errors) - } - - fn compile_instruction(&mut self, block: &Block, instr_val: &Value) -> CompileResult<()> { - let mut warnings = Vec::new(); - let mut errors = Vec::new(); - if let ValueDatum::Instruction(instruction) = &self.context.values[instr_val.0].value { - match instruction { - Instruction::AddrOf(arg) => self.compile_addr_of(instr_val, arg), - Instruction::AsmBlock(asm, args) => { - check!( - self.compile_asm_block(instr_val, asm, args), - return err(warnings, errors), - warnings, - errors - ) - } - Instruction::BitCast(val, ty) => self.compile_bitcast(instr_val, val, ty), - Instruction::BinaryOp { op, arg1, arg2 } => { - self.compile_binary_op(instr_val, op, arg1, arg2) - } - Instruction::Branch(to_block) => self.compile_branch(block, to_block), - Instruction::Call(..) => { - errors.push(CompileError::Internal( - "Calls are not yet supported.", - self.md_mgr - .val_to_span(self.context, *instr_val) - .unwrap_or_else(Self::empty_span), - )); - return err(warnings, errors); - } - Instruction::Cmp(pred, lhs_value, rhs_value) => { - self.compile_cmp(instr_val, pred, lhs_value, rhs_value) - } - Instruction::ConditionalBranch { - cond_value, - true_block, - false_block, - } => self.compile_conditional_branch(cond_value, block, true_block, false_block), - Instruction::ContractCall { - params, - coins, - asset_id, - gas, - .. - } => self.compile_contract_call(instr_val, params, coins, asset_id, gas), - Instruction::ExtractElement { - array, - ty, - index_val, - } => self.compile_extract_element(instr_val, array, ty, index_val), - Instruction::ExtractValue { - aggregate, indices, .. - } => self.compile_extract_value(instr_val, aggregate, indices), - Instruction::GetStorageKey => { - check!( - self.compile_get_storage_key(instr_val), - return err(warnings, errors), - warnings, - errors - ) - } - Instruction::GetPointer { - base_ptr, - ptr_ty, - offset, - } => self.compile_get_pointer(instr_val, base_ptr, ptr_ty, *offset), - Instruction::Gtf { index, tx_field_id } => { - self.compile_gtf(instr_val, index, *tx_field_id) - } - Instruction::InsertElement { - array, - ty, - value, - index_val, - } => self.compile_insert_element(instr_val, array, ty, value, index_val), - Instruction::InsertValue { - aggregate, - value, - indices, - .. - } => self.compile_insert_value(instr_val, aggregate, value, indices), - Instruction::IntToPtr(val, _) => self.compile_int_to_ptr(instr_val, val), - Instruction::Load(src_val) => check!( - self.compile_load(instr_val, src_val), - return err(warnings, errors), - warnings, - errors - ), - Instruction::Log { - log_val, - log_ty, - log_id, - } => self.compile_log(instr_val, log_val, log_ty, log_id), - Instruction::Nop => (), - Instruction::Phi(_) => (), // Managing the phi value is done in br and cbr compilation. - Instruction::ReadRegister(reg) => self.compile_read_register(instr_val, reg), - Instruction::Ret(ret_val, ty) => self.compile_ret(instr_val, ret_val, ty), - Instruction::StateLoadQuadWord { load_val, key } => check!( - self.compile_state_access_quad_word( - instr_val, - load_val, - key, - StateAccessType::Read - ), - return err(warnings, errors), - warnings, - errors - ), - Instruction::StateLoadWord(key) => check!( - self.compile_state_load_word(instr_val, key), - return err(warnings, errors), - warnings, - errors - ), - Instruction::StateStoreQuadWord { stored_val, key } => check!( - self.compile_state_access_quad_word( - instr_val, - stored_val, - key, - StateAccessType::Write - ), - return err(warnings, errors), - warnings, - errors - ), - Instruction::StateStoreWord { stored_val, key } => check!( - self.compile_state_store_word(instr_val, stored_val, key), - return err(warnings, errors), - warnings, - errors - ), - Instruction::Store { - dst_val, - stored_val, - } => check!( - self.compile_store(instr_val, dst_val, stored_val), - return err(warnings, errors), - warnings, - errors - ), - } - } else { - errors.push(CompileError::Internal( - "Value not an instruction.", - self.md_mgr - .val_to_span(self.context, *instr_val) - .unwrap_or_else(Self::empty_span), - )); - } - ok((), warnings, errors) - } - - // OK, I began by trying to translate the IR ASM block data structures back into AST data - // structures which I could feed to the code in asm_generation/expression/mod.rs where it - // compiles the inline ASM. But it's more work to do that than to just re-implement that - // algorithm with the IR data here. - - fn compile_asm_block( - &mut self, - instr_val: &Value, - asm: &AsmBlock, - asm_args: &[AsmArg], - ) -> CompileResult<()> { - let mut warnings: Vec = Vec::new(); - let mut errors: Vec = Vec::new(); - let mut inline_reg_map = HashMap::new(); - let mut inline_ops = Vec::new(); - for AsmArg { name, initializer } in asm_args { - assert_or_warn!( - ConstantRegister::parse_register_name(name.as_str()).is_none(), - warnings, - name.span().clone(), - Warning::ShadowingReservedRegister { - reg_name: name.clone() - } - ); - let arg_reg = initializer - .map(|init_val| self.value_to_register(&init_val)) - .unwrap_or_else(|| self.reg_seqr.next()); - inline_reg_map.insert(name.as_str(), arg_reg); - } - - let realize_register = |reg_name: &str| { - inline_reg_map.get(reg_name).cloned().or_else(|| { - ConstantRegister::parse_register_name(reg_name).map(&VirtualRegister::Constant) - }) - }; - - // For each opcode in the asm expression, attempt to parse it into an opcode and - // replace references to the above registers with the newly allocated ones. - let asm_block = &self.context.asm_blocks[asm.0]; - for op in &asm_block.body { - let replaced_registers = op - .args - .iter() - .map(|reg_name| -> Result<_, CompileError> { - realize_register(reg_name.as_str()).ok_or_else(|| { - CompileError::UnknownRegister { - span: reg_name.span(), - initialized_registers: inline_reg_map - .iter() - .map(|(name, _)| *name) - .collect::>() - .join("\n"), - } - }) - }) - .filter_map(|res| match res { - Err(e) => { - errors.push(e); - None - } - Ok(o) => Some(o), - }) - .collect::>(); - - // Parse the actual op and registers. - let op_span = self - .md_mgr - .md_to_span(self.context, op.metadata) - .unwrap_or_else(Self::empty_span); - let opcode = check!( - Op::parse_opcode( - &op.name, - &replaced_registers, - &op.immediate, - op_span.clone(), - ), - return err(warnings, errors), - warnings, - errors - ); - - inline_ops.push(Op { - opcode: either::Either::Left(opcode), - comment: "asm block".into(), - owning_span: Some(op_span), - }); - } - - // Now, load the designated asm return register into the desired return register, but only - // if it was named. - if let Some(ret_reg_name) = &asm_block.return_name { - // Lookup and replace the return register. - let ret_reg = match realize_register(ret_reg_name.as_str()) { - Some(reg) => reg, - None => { - errors.push(CompileError::UnknownRegister { - initialized_registers: inline_reg_map - .iter() - .map(|(name, _)| name.to_string()) - .collect::>() - .join("\n"), - span: ret_reg_name.span(), - }); - return err(warnings, errors); - } - }; - let instr_reg = self.reg_seqr.next(); - inline_ops.push(Op { - opcode: Either::Left(VirtualOp::MOVE(instr_reg.clone(), ret_reg)), - comment: "return value from inline asm".into(), - owning_span: self.md_mgr.val_to_span(self.context, *instr_val), - }); - self.reg_map.insert(*instr_val, instr_reg); - } - - self.bytecode.append(&mut inline_ops); - - ok((), warnings, errors) - } - - fn compile_addr_of(&mut self, instr_val: &Value, arg: &Value) { - let reg = self.value_to_register(arg); - self.reg_map.insert(*instr_val, reg); - } - - fn compile_bitcast(&mut self, instr_val: &Value, bitcast_val: &Value, to_type: &Type) { - let val_reg = self.value_to_register(bitcast_val); - let reg = if let Type::Bool = to_type { - // This may not be necessary if we just treat a non-zero value as 'true'. - let res_reg = self.reg_seqr.next(); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::EQ( - res_reg.clone(), - val_reg, - VirtualRegister::Constant(ConstantRegister::Zero), - )), - comment: "convert to inversed boolean".into(), - owning_span: self.md_mgr.val_to_span(self.context, *instr_val), - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::XORI( - res_reg.clone(), - res_reg.clone(), - VirtualImmediate12 { value: 1 }, - )), - comment: "invert boolean".into(), - owning_span: self.md_mgr.val_to_span(self.context, *instr_val), - }); - res_reg - } else { - // This is a no-op, although strictly speaking Unit should probably be compiled as - // a zero. - val_reg - }; - self.reg_map.insert(*instr_val, reg); - } - - fn compile_binary_op( - &mut self, - instr_val: &Value, - op: &BinaryOpKind, - arg1: &Value, - arg2: &Value, - ) { - let val1_reg = self.value_to_register(arg1); - let val2_reg = self.value_to_register(arg2); - let res_reg = self.reg_seqr.next(); - let opcode = match op { - BinaryOpKind::Add => Either::Left(VirtualOp::ADD(res_reg.clone(), val1_reg, val2_reg)), - BinaryOpKind::Sub => Either::Left(VirtualOp::SUB(res_reg.clone(), val1_reg, val2_reg)), - BinaryOpKind::Mul => Either::Left(VirtualOp::MUL(res_reg.clone(), val1_reg, val2_reg)), - BinaryOpKind::Div => Either::Left(VirtualOp::DIV(res_reg.clone(), val1_reg, val2_reg)), - }; - self.bytecode.push(Op { - opcode, - comment: String::new(), - owning_span: self.md_mgr.val_to_span(self.context, *instr_val), - }); - - self.reg_map.insert(*instr_val, res_reg); - } - - fn compile_branch(&mut self, from_block: &Block, to_block: &Block) { - self.compile_branch_to_phi_value(from_block, to_block); - - let label = self.block_to_label(to_block); - self.bytecode.push(Op::jump_to_label(label)); - } - - fn compile_cmp( - &mut self, - instr_val: &Value, - pred: &Predicate, - lhs_value: &Value, - rhs_value: &Value, - ) { - let lhs_reg = self.value_to_register(lhs_value); - let rhs_reg = self.value_to_register(rhs_value); - let res_reg = self.reg_seqr.next(); - match pred { - Predicate::Equal => { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::EQ(res_reg.clone(), lhs_reg, rhs_reg)), - comment: String::new(), - owning_span: self.md_mgr.val_to_span(self.context, *instr_val), - }); - } - } - self.reg_map.insert(*instr_val, res_reg); - } - - fn compile_conditional_branch( - &mut self, - cond_value: &Value, - from_block: &Block, - true_block: &Block, - false_block: &Block, - ) { - self.compile_branch_to_phi_value(from_block, true_block); - self.compile_branch_to_phi_value(from_block, false_block); - - let cond_reg = self.value_to_register(cond_value); - - let true_label = self.block_to_label(true_block); - self.bytecode - .push(Op::jump_if_not_zero(cond_reg, true_label)); - - let false_label = self.block_to_label(false_block); - self.bytecode.push(Op::jump_to_label(false_label)); - } - - fn compile_branch_to_phi_value(&mut self, from_block: &Block, to_block: &Block) { - if let Some(local_val) = to_block.get_phi_val_coming_from(self.context, from_block) { - // We only need a MOVE here if get_phi_val_coming_from() is actually assigned to a - // register - if let Some(local_reg) = self.value_to_register_or_none(&local_val) { - let phi_reg = self.value_to_register(&to_block.get_phi(self.context)); - self.bytecode.push(Op::unowned_register_move_comment( - phi_reg, - local_reg, - "branch to phi value", - )); - } - } - } - - #[allow(clippy::too_many_arguments)] - fn compile_contract_call( - &mut self, - instr_val: &Value, - params: &Value, - coins: &Value, - asset_id: &Value, - gas: &Value, - ) { - let ra_pointer = self.value_to_register(params); - let coins_register = self.value_to_register(coins); - let asset_id_register = self.value_to_register(asset_id); - let gas_register = self.value_to_register(gas); - - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::CALL( - ra_pointer, - coins_register, - asset_id_register, - gas_register, - )), - comment: "call external contract".into(), - owning_span: self.md_mgr.val_to_span(self.context, *instr_val), - }); - - // now, move the return value of the contract call to the return register. - // TODO validate RETL matches the expected type (this is a comment from the old codegen) - let instr_reg = self.reg_seqr.next(); - self.bytecode.push(Op::unowned_register_move( - instr_reg.clone(), - VirtualRegister::Constant(ConstantRegister::ReturnValue), - )); - self.reg_map.insert(*instr_val, instr_reg); - } - - fn compile_extract_element( - &mut self, - instr_val: &Value, - array: &Value, - ty: &Aggregate, - index_val: &Value, - ) { - // Base register should pointer to some stack allocated memory. - let base_reg = self.value_to_register(array); - - // Index value is the array element index, not byte nor word offset. - let index_reg = self.value_to_register(index_val); - let rel_offset_reg = match index_reg { - VirtualRegister::Virtual(_) => { - // We can reuse the register. - index_reg.clone() - } - VirtualRegister::Constant(_) => { - // We have a constant register, cannot reuse it. - self.reg_seqr.next() - } - }; - - // We could put the OOB check here, though I'm now thinking it would be too wasteful. - // See compile_bounds_assertion() in expression/array.rs (or look in Git history). - - let instr_reg = self.reg_seqr.next(); - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - let elem_type = ty.get_elem_type(self.context).unwrap(); - let elem_size = ir_type_size_in_bytes(self.context, &elem_type); - if elem_type.is_copy_type() { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MULI( - rel_offset_reg.clone(), - index_reg, - VirtualImmediate12 { value: 8 }, - )), - comment: "extract_element relative offset".into(), - owning_span: owning_span.clone(), - }); - let elem_offs_reg = self.reg_seqr.next(); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - elem_offs_reg.clone(), - base_reg, - rel_offset_reg, - )), - comment: "extract_element absolute offset".into(), - owning_span: owning_span.clone(), - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - instr_reg.clone(), - elem_offs_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: "extract_element".into(), - owning_span, - }); - } else { - // Value too big for a register, so we return the memory offset. - if elem_size > compiler_constants::TWELVE_BITS { - let size_data_id = self - .data_section - .insert_data_value(&Literal::U64(elem_size)); - let size_reg = self.reg_seqr.next(); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LWDataId(size_reg.clone(), size_data_id)), - owning_span: owning_span.clone(), - comment: "loading element size for relative offset".into(), - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MUL(instr_reg.clone(), index_reg, size_reg)), - comment: "extract_element relative offset".into(), - owning_span: owning_span.clone(), - }); - } else { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MULI( - instr_reg.clone(), - index_reg, - VirtualImmediate12 { - value: elem_size as u16, - }, - )), - comment: "extract_element relative offset".into(), - owning_span: owning_span.clone(), - }); - } - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - instr_reg.clone(), - base_reg, - instr_reg.clone(), - )), - comment: "extract_element absolute offset".into(), - owning_span, - }); - } - - self.reg_map.insert(*instr_val, instr_reg); - } - - fn compile_extract_value(&mut self, instr_val: &Value, aggregate_val: &Value, indices: &[u64]) { - // Base register should pointer to some stack allocated memory. - let base_reg = self.value_to_register(aggregate_val); - let ((extract_offset, _), field_type) = aggregate_idcs_to_field_layout( - self.context, - &aggregate_val.get_stripped_ptr_type(self.context).unwrap(), - indices, - ); - - let instr_reg = self.reg_seqr.next(); - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - if field_type.is_copy_type() { - if extract_offset > compiler_constants::TWELVE_BITS { - let offset_reg = self.reg_seqr.next(); - self.number_to_reg(extract_offset, &offset_reg, owning_span.clone()); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - offset_reg.clone(), - base_reg.clone(), - base_reg, - )), - comment: "add array base to offset".into(), - owning_span: owning_span.clone(), - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - instr_reg.clone(), - offset_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: format!( - "extract_value @ {}", - indices - .iter() - .map(|idx| format!("{}", idx)) - .collect::>() - .join(",") - ), - owning_span, - }); - } else { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - instr_reg.clone(), - base_reg, - VirtualImmediate12 { - value: extract_offset as u16, - }, - )), - comment: format!( - "extract_value @ {}", - indices - .iter() - .map(|idx| format!("{}", idx)) - .collect::>() - .join(",") - ), - owning_span, - }); - } - } else { - // Value too big for a register, so we return the memory offset. - if extract_offset * 8 > compiler_constants::TWELVE_BITS { - let offset_reg = self.reg_seqr.next(); - self.number_to_reg(extract_offset * 8, &offset_reg, owning_span.clone()); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - instr_reg.clone(), - base_reg, - offset_reg, - )), - comment: "extract address".into(), - owning_span, - }); - } else { - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - instr_reg.clone(), - base_reg, - VirtualImmediate12 { - value: (extract_offset * 8) as u16, - }, - )), - comment: "extract address".into(), - owning_span, - }); - } - } - - self.reg_map.insert(*instr_val, instr_reg); - } - - fn compile_get_storage_key(&mut self, instr_val: &Value) -> CompileResult<()> { - let warnings: Vec = Vec::new(); - let mut errors: Vec = Vec::new(); - - let state_idx = self.md_mgr.val_to_storage_key(self.context, *instr_val); - let instr_span = self.md_mgr.val_to_span(self.context, *instr_val); - - let storage_slot_to_hash = match state_idx { - Some(state_idx) => { - format!( - "{}{}", - sway_utils::constants::STORAGE_DOMAIN_SEPARATOR, - state_idx - ) - } - None => { - errors.push(CompileError::Internal( - "State index for __get_storage_key is not available as a metadata", - instr_span.unwrap_or_else(Self::empty_span), - )); - return err(warnings, errors); - } - }; - - let hashed_storage_slot = Hasher::hash(storage_slot_to_hash); - - let data_id = self - .data_section - .insert_data_value(&Literal::B256(hashed_storage_slot.into())); - - // Allocate a register for it, and a load instruction. - let reg = self.reg_seqr.next(); - - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::LWDataId(reg.clone(), data_id)), - comment: "literal instantiation".into(), - owning_span: instr_span, - }); - self.reg_map.insert(*instr_val, reg); - ok((), warnings, errors) - } - - fn compile_get_pointer( - &mut self, - instr_val: &Value, - base_ptr: &Pointer, - ptr_ty: &Pointer, - offset: u64, - ) { - // `get_ptr` is like a `load` except the value isn't dereferenced. - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - match self.ptr_map.get(base_ptr) { - None => unimplemented!("BUG? Uninitialised pointer."), - Some(storage) => match storage.clone() { - Storage::Data(_data_id) => { - // Not sure if we'll ever need this. - unimplemented!("TODO get_ptr() into the data section."); - } - Storage::Stack(word_offs) => { - let ptr_ty_size_in_bytes = - ir_type_size_in_bytes(self.context, ptr_ty.get_type(self.context)); - - let offset_in_bytes = word_offs * 8 + ptr_ty_size_in_bytes * offset; - let instr_reg = self.reg_seqr.next(); - if offset_in_bytes > compiler_constants::TWELVE_BITS { - self.number_to_reg(offset_in_bytes, &instr_reg, owning_span.clone()); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - instr_reg.clone(), - self.stack_base_reg.as_ref().unwrap().clone(), - instr_reg.clone(), - )), - comment: "get offset reg for get_ptr".into(), - owning_span, - }); - } else { - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - instr_reg.clone(), - self.stack_base_reg.as_ref().unwrap().clone(), - VirtualImmediate12 { - value: (offset_in_bytes) as u16, - }, - )), - comment: "get offset reg for get_ptr".into(), - owning_span, - }); - } - self.reg_map.insert(*instr_val, instr_reg); - } - }, - } - } - - fn compile_gtf(&mut self, instr_val: &Value, index: &Value, tx_field_id: u64) { - let instr_reg = self.reg_seqr.next(); - let index_reg = self.value_to_register(index); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::GTF( - instr_reg.clone(), - index_reg, - VirtualImmediate12 { - value: tx_field_id as u16, - }, - )), - comment: "get transaction field".into(), - owning_span: self.md_mgr.val_to_span(self.context, *instr_val), - }); - self.reg_map.insert(*instr_val, instr_reg); - } - - fn compile_insert_element( - &mut self, - instr_val: &Value, - array: &Value, - ty: &Aggregate, - value: &Value, - index_val: &Value, - ) { - // Base register should point to some stack allocated memory. - let base_reg = self.value_to_register(array); - let insert_reg = self.value_to_register(value); - - // Index value is the array element index, not byte nor word offset. - let index_reg = self.value_to_register(index_val); - let rel_offset_reg = match index_reg { - VirtualRegister::Virtual(_) => { - // We can reuse the register. - index_reg.clone() - } - VirtualRegister::Constant(_) => { - // We have a constant register, cannot reuse it. - self.reg_seqr.next() - } - }; - - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - - let elem_type = ty.get_elem_type(self.context).unwrap(); - let elem_size = ir_type_size_in_bytes(self.context, &elem_type); - if elem_type.is_copy_type() { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MULI( - rel_offset_reg.clone(), - index_reg, - VirtualImmediate12 { value: 8 }, - )), - comment: "insert_element relative offset".into(), - owning_span: owning_span.clone(), - }); - let elem_offs_reg = self.reg_seqr.next(); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - elem_offs_reg.clone(), - base_reg.clone(), - rel_offset_reg, - )), - comment: "insert_element absolute offset".into(), - owning_span: owning_span.clone(), - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - elem_offs_reg, - insert_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: "insert_element".into(), - owning_span, - }); - } else { - // Element size is larger than 8; we switch to bytewise offsets and sizes and use MCP. - if elem_size > compiler_constants::TWELVE_BITS { - todo!("array element size bigger than 4k") - } else { - let elem_index_offs_reg = self.reg_seqr.next(); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MULI( - elem_index_offs_reg.clone(), - index_reg, - VirtualImmediate12 { - value: elem_size as u16, - }, - )), - comment: "insert_element relative offset".into(), - owning_span: owning_span.clone(), - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - elem_index_offs_reg.clone(), - base_reg.clone(), - elem_index_offs_reg.clone(), - )), - comment: "insert_element absolute offset".into(), - owning_span: owning_span.clone(), - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCPI( - elem_index_offs_reg, - insert_reg, - VirtualImmediate12 { - value: elem_size as u16, - }, - )), - comment: "insert_element store value".into(), - owning_span, - }); - } - } - - // We set the 'instruction' register to the base register, so that cascading inserts will - // work. - self.reg_map.insert(*instr_val, base_reg); - } - - fn compile_insert_value( - &mut self, - instr_val: &Value, - aggregate_val: &Value, - value: &Value, - indices: &[u64], - ) { - // Base register should point to some stack allocated memory. - let base_reg = self.value_to_register(aggregate_val); - - let insert_reg = self.value_to_register(value); - let ((mut insert_offs, field_size_in_bytes), field_type) = aggregate_idcs_to_field_layout( - self.context, - &aggregate_val.get_stripped_ptr_type(self.context).unwrap(), - indices, - ); - - let value_type = value.get_stripped_ptr_type(self.context).unwrap(); - let value_size_in_bytes = ir_type_size_in_bytes(self.context, &value_type); - let value_size_in_words = size_bytes_in_words!(value_size_in_bytes); - - // Account for the padding if the final field type is a union and the value we're trying to - // insert is smaller than the size of the union (i.e. we're inserting a small variant). - if matches!(field_type, Type::Union(_)) { - let field_size_in_words = size_bytes_in_words!(field_size_in_bytes); - assert!(field_size_in_words >= value_size_in_words); - insert_offs += field_size_in_words - value_size_in_words; - } - - let indices_str = indices - .iter() - .map(|idx| format!("{}", idx)) - .collect::>() - .join(","); - - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - - if value_type.is_copy_type() { - if insert_offs > compiler_constants::TWELVE_BITS { - let insert_offs_reg = self.reg_seqr.next(); - self.number_to_reg(insert_offs, &insert_offs_reg, owning_span.clone()); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - base_reg.clone(), - base_reg.clone(), - insert_offs_reg, - )), - comment: "insert_value absolute offset".into(), - owning_span: owning_span.clone(), - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - base_reg.clone(), - insert_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: format!("insert_value @ {}", indices_str), - owning_span, - }); - } else { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - base_reg.clone(), - insert_reg, - VirtualImmediate12 { - value: insert_offs as u16, - }, - )), - comment: format!("insert_value @ {}", indices_str), - owning_span, - }); - } - } else { - let offs_reg = self.reg_seqr.next(); - if insert_offs * 8 > compiler_constants::TWELVE_BITS { - self.number_to_reg(insert_offs * 8, &offs_reg, owning_span.clone()); - } else { - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - offs_reg.clone(), - base_reg.clone(), - VirtualImmediate12 { - value: (insert_offs * 8) as u16, - }, - )), - comment: format!("get struct field(s) {} offset", indices_str), - owning_span: owning_span.clone(), - }); - } - if value_size_in_bytes > compiler_constants::TWELVE_BITS { - let size_reg = self.reg_seqr.next(); - self.number_to_reg(value_size_in_bytes, &size_reg, owning_span.clone()); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCP(offs_reg, insert_reg, size_reg)), - comment: "store struct field value".into(), - owning_span, - }); - } else { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCPI( - offs_reg, - insert_reg, - VirtualImmediate12 { - value: value_size_in_bytes as u16, - }, - )), - comment: "store struct field value".into(), - owning_span, - }); - } - } - - // We set the 'instruction' register to the base register, so that cascading inserts will - // work. - self.reg_map.insert(*instr_val, base_reg); - } - - fn compile_int_to_ptr(&mut self, instr_val: &Value, int_to_ptr_val: &Value) { - let val_reg = self.value_to_register(int_to_ptr_val); - self.reg_map.insert(*instr_val, val_reg); - } - - fn compile_load(&mut self, instr_val: &Value, src_val: &Value) -> CompileResult<()> { - let ptr = self.resolve_ptr(src_val); - if ptr.value.is_none() { - return ptr.map(|_| ()); - } - let (ptr, _ptr_ty, _offset) = ptr.value.unwrap(); - let instr_reg = self.reg_seqr.next(); - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - match self.ptr_map.get(&ptr) { - None => unimplemented!("BUG? Uninitialised pointer."), - Some(storage) => match storage.clone() { - Storage::Data(data_id) => { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LWDataId(instr_reg.clone(), data_id)), - comment: "load constant".into(), - owning_span, - }); - } - Storage::Stack(word_offs) => { - let base_reg = self.stack_base_reg.as_ref().unwrap().clone(); - if ptr.get_type(self.context).is_copy_type() { - // Value can fit in a register, so we load the value. - if word_offs > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg( - word_offs * 8, // Base reg for LW is in bytes - &offs_reg, - owning_span.clone(), - ); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - offs_reg.clone(), - base_reg, - offs_reg.clone(), - )), - comment: "absolute offset for load".into(), - owning_span: owning_span.clone(), - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - instr_reg.clone(), - offs_reg.clone(), - VirtualImmediate12 { value: 0 }, - )), - comment: "load value".into(), - owning_span, - }); - } else { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - instr_reg.clone(), - base_reg, - VirtualImmediate12 { - value: word_offs as u16, - }, - )), - comment: "load value".into(), - owning_span, - }); - } - } else { - // Value too big for a register, so we return the memory offset. This is - // what LW to the data section does, via LWDataId. - let word_offs = word_offs * 8; - if word_offs > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg(word_offs, &offs_reg, owning_span.clone()); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - instr_reg.clone(), - base_reg, - offs_reg, - )), - comment: "load address".into(), - owning_span, - }); - } else { - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - instr_reg.clone(), - base_reg, - VirtualImmediate12 { - value: word_offs as u16, - }, - )), - comment: "load address".into(), - owning_span, - }); - } - } - } - }, - } - self.reg_map.insert(*instr_val, instr_reg); - ok((), Vec::new(), Vec::new()) - } - - fn compile_log(&mut self, instr_val: &Value, log_val: &Value, log_ty: &Type, log_id: &Value) { - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - let log_val_reg = self.value_to_register(log_val); - let log_id_reg = self.value_to_register(log_id); - - if log_ty.is_copy_type() { - self.bytecode.push(Op { - owning_span, - opcode: Either::Left(VirtualOp::LOG( - log_val_reg, - log_id_reg, - VirtualRegister::Constant(ConstantRegister::Zero), - VirtualRegister::Constant(ConstantRegister::Zero), - )), - comment: "".into(), - }); - } else { - // If the type not a reference type then we use LOGD to log the data. First put the - // size into the data section, then add a LW to get it, then add a LOGD which uses - // it. - let size_reg = self.reg_seqr.next(); - let size_in_bytes = ir_type_size_in_bytes(self.context, log_ty); - let size_data_id = self - .data_section - .insert_data_value(&Literal::U64(size_in_bytes)); - - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LWDataId(size_reg.clone(), size_data_id)), - owning_span: owning_span.clone(), - comment: "loading size for LOGD".into(), - }); - self.bytecode.push(Op { - owning_span, - opcode: Either::Left(VirtualOp::LOGD( - VirtualRegister::Constant(ConstantRegister::Zero), - log_id_reg, - log_val_reg, - size_reg, - )), - comment: "".into(), - }); - } - } - - fn compile_read_register(&mut self, instr_val: &Value, reg: &sway_ir::Register) { - let instr_reg = self.reg_seqr.next(); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MOVE( - instr_reg.clone(), - VirtualRegister::Constant(match reg { - sway_ir::Register::Of => ConstantRegister::Overflow, - sway_ir::Register::Pc => ConstantRegister::ProgramCounter, - sway_ir::Register::Ssp => ConstantRegister::StackStartPointer, - sway_ir::Register::Sp => ConstantRegister::StackPointer, - sway_ir::Register::Fp => ConstantRegister::FramePointer, - sway_ir::Register::Hp => ConstantRegister::HeapPointer, - sway_ir::Register::Error => ConstantRegister::Error, - sway_ir::Register::Ggas => ConstantRegister::GlobalGas, - sway_ir::Register::Cgas => ConstantRegister::ContextGas, - sway_ir::Register::Bal => ConstantRegister::Balance, - sway_ir::Register::Is => ConstantRegister::InstructionStart, - sway_ir::Register::Ret => ConstantRegister::ReturnValue, - sway_ir::Register::Retl => ConstantRegister::ReturnLength, - sway_ir::Register::Flag => ConstantRegister::Flags, - }), - )), - comment: "move register into abi function".to_owned(), - owning_span: self.md_mgr.val_to_span(self.context, *instr_val), - }); - - self.reg_map.insert(*instr_val, instr_reg); - } - - fn compile_ret(&mut self, instr_val: &Value, ret_val: &Value, ret_type: &Type) { - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - if ret_type.eq(self.context, &Type::Unit) { - // Unit returns should always be zero, although because they can be omitted from - // functions, the register is sometimes uninitialized. Manually return zero in this - // case. - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::RET(VirtualRegister::Constant( - ConstantRegister::Zero, - ))), - owning_span, - comment: "returning unit as zero".into(), - }); - } else { - let ret_reg = self.value_to_register(ret_val); - - if ret_type.is_copy_type() { - self.bytecode.push(Op { - owning_span, - opcode: Either::Left(VirtualOp::RET(ret_reg)), - comment: "".into(), - }); - } else { - // If the type not a reference type then we use RETD to return data. First put the - // size into the data section, then add a LW to get it, then add a RETD which uses - // it. - let size_reg = self.reg_seqr.next(); - let size_in_bytes = ir_type_size_in_bytes(self.context, ret_type); - let size_data_id = self - .data_section - .insert_data_value(&Literal::U64(size_in_bytes)); - - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LWDataId(size_reg.clone(), size_data_id)), - owning_span: owning_span.clone(), - comment: "loading size for RETD".into(), - }); - self.bytecode.push(Op { - owning_span, - opcode: Either::Left(VirtualOp::RETD(ret_reg, size_reg)), - comment: "".into(), - }); - } - } - } - - fn offset_reg( - &mut self, - base_reg: &VirtualRegister, - offset_in_bytes: u64, - span: Option, - ) -> VirtualRegister { - let offset_reg = self.reg_seqr.next(); - if offset_in_bytes > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg(offset_in_bytes, &offs_reg, span.clone()); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - offset_reg.clone(), - base_reg.clone(), - offs_reg, - )), - comment: "get offset".into(), - owning_span: span, - }); - } else { - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - offset_reg.clone(), - base_reg.clone(), - VirtualImmediate12 { - value: offset_in_bytes as u16, - }, - )), - comment: "get offset".into(), - owning_span: span, - }); - } - - offset_reg - } - - fn compile_state_access_quad_word( - &mut self, - instr_val: &Value, - val: &Value, - key: &Value, - access_type: StateAccessType, - ) -> CompileResult<()> { - // Make sure that both val and key are pointers to B256. - assert!(matches!( - val.get_stripped_ptr_type(self.context), - Some(Type::B256) - )); - assert!(matches!( - key.get_stripped_ptr_type(self.context), - Some(Type::B256) - )); - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - - let key_ptr = self.resolve_ptr(key); - if key_ptr.value.is_none() { - return key_ptr.map(|_| ()); - } - let (key_ptr, ptr_ty, offset) = key_ptr.value.unwrap(); - - // Not expecting an offset here nor a pointer cast - assert!(offset == 0); - assert!(ptr_ty.get_type(self.context).eq(self.context, &Type::B256)); - - let val_reg = if matches!( - &self.context.values[val.0].value, - ValueDatum::Instruction(Instruction::IntToPtr(..)) - ) { - match self.reg_map.get(val) { - Some(vreg) => vreg.clone(), - None => unreachable!("int_to_ptr instruction doesn't have vreg mapped"), - } - } else { - // Expect ptr_ty here to also be b256 and offset to be whatever... - let val_ptr = self.resolve_ptr(val); - if val_ptr.value.is_none() { - return val_ptr.map(|_| ()); - } - let (val_ptr, ptr_ty, offset) = val_ptr.value.unwrap(); - // Expect the ptr_ty for val to also be B256 - assert!(ptr_ty.get_type(self.context).eq(self.context, &Type::B256)); - match self.ptr_map.get(&val_ptr) { - Some(Storage::Stack(val_offset)) => { - let base_reg = self.stack_base_reg.as_ref().unwrap().clone(); - let val_offset_in_bytes = val_offset * 8 + offset * 32; - self.offset_reg(&base_reg, val_offset_in_bytes, owning_span.clone()) - } - _ => unreachable!("Unexpected storage locations for key and val"), - } - }; - - let key_reg = match self.ptr_map.get(&key_ptr) { - Some(Storage::Stack(key_offset)) => { - let base_reg = self.stack_base_reg.as_ref().unwrap().clone(); - let key_offset_in_bytes = key_offset * 8; - self.offset_reg(&base_reg, key_offset_in_bytes, owning_span.clone()) - } - _ => unreachable!("Unexpected storage locations for key and val"), - }; - - self.bytecode.push(Op { - opcode: Either::Left(match access_type { - StateAccessType::Read => VirtualOp::SRWQ(val_reg, key_reg), - StateAccessType::Write => VirtualOp::SWWQ(key_reg, val_reg), - }), - comment: "quad word state access".into(), - owning_span, - }); - ok((), Vec::new(), Vec::new()) - } - - fn compile_state_load_word(&mut self, instr_val: &Value, key: &Value) -> CompileResult<()> { - // Make sure that the key is a pointers to B256. - assert!(matches!( - key.get_stripped_ptr_type(self.context), - Some(Type::B256) - )); - - let key_ptr = self.resolve_ptr(key); - if key_ptr.value.is_none() { - return key_ptr.map(|_| ()); - } - let (key_ptr, ptr_ty, offset) = key_ptr.value.unwrap(); - - // Not expecting an offset here nor a pointer cast - assert!(offset == 0); - assert!(ptr_ty.get_type(self.context).eq(self.context, &Type::B256)); - - let load_reg = self.reg_seqr.next(); - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - match self.ptr_map.get(&key_ptr) { - Some(Storage::Stack(key_offset)) => { - let base_reg = self.stack_base_reg.as_ref().unwrap().clone(); - let key_offset_in_bytes = key_offset * 8; - - let key_reg = self.offset_reg(&base_reg, key_offset_in_bytes, owning_span.clone()); - - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::SRW(load_reg.clone(), key_reg)), - comment: "single word state access".into(), - owning_span, - }); - } - _ => unreachable!("Unexpected storage location for key"), - } - - self.reg_map.insert(*instr_val, load_reg); - ok((), Vec::new(), Vec::new()) - } - - fn compile_state_store_word( - &mut self, - instr_val: &Value, - store_val: &Value, - key: &Value, - ) -> CompileResult<()> { - // Make sure that key is a pointer to B256. - assert!(matches!( - key.get_stripped_ptr_type(self.context), - Some(Type::B256) - )); - - // Make sure that store_val is a U64 value. - assert!(matches!( - store_val.get_type(self.context), - Some(Type::Uint(64)) - )); - let store_reg = self.value_to_register(store_val); - - // Expect the get_ptr here to have type b256 and offset = 0??? - let key_ptr = self.resolve_ptr(key); - if key_ptr.value.is_none() { - return key_ptr.map(|_| ()); - } - let (key_ptr, ptr_ty, offset) = key_ptr.value.unwrap(); - - // Not expecting an offset here nor a pointer cast - assert!(offset == 0); - assert!(ptr_ty.get_type(self.context).eq(self.context, &Type::B256)); - - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - match self.ptr_map.get(&key_ptr) { - Some(Storage::Stack(key_offset)) => { - let base_reg = self.stack_base_reg.as_ref().unwrap().clone(); - let key_offset_in_bytes = key_offset * 8; - - let key_reg = self.offset_reg(&base_reg, key_offset_in_bytes, owning_span.clone()); - - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::SWW(key_reg, store_reg)), - comment: "single word state access".into(), - owning_span, - }); - } - _ => unreachable!("Unexpected storage locations for key and store_val"), - } - - ok((), Vec::new(), Vec::new()) - } - - fn compile_store( - &mut self, - instr_val: &Value, - dst_val: &Value, - stored_val: &Value, - ) -> CompileResult<()> { - let ptr = self.resolve_ptr(dst_val); - if ptr.value.is_none() { - return ptr.map(|_| ()); - } - let (ptr, _ptr_ty, _offset) = ptr.value.unwrap(); - let stored_reg = self.value_to_register(stored_val); - let is_aggregate_ptr = ptr.is_aggregate_ptr(self.context); - let owning_span = self.md_mgr.val_to_span(self.context, *instr_val); - match self.ptr_map.get(&ptr) { - None => unreachable!("Bug! Trying to store to an unknown pointer."), - Some(storage) => match storage { - Storage::Data(_) => unreachable!("BUG! Trying to store to the data section."), - Storage::Stack(word_offs) => { - let word_offs = *word_offs; - let store_type = ptr.get_type(self.context); - let store_size_in_words = - size_bytes_in_words!(ir_type_size_in_bytes(self.context, store_type)); - if store_type.is_copy_type() { - let base_reg = self.stack_base_reg.as_ref().unwrap().clone(); - - // A single word can be stored with SW. - let stored_reg = if !is_aggregate_ptr { - // stored_reg is a value. - stored_reg - } else { - // stored_reg is a pointer, even though size is 1. We need to load it. - let tmp_reg = self.reg_seqr.next(); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::LW( - tmp_reg.clone(), - stored_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: "load for store".into(), - owning_span: owning_span.clone(), - }); - tmp_reg - }; - if word_offs > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg( - word_offs * 8, // Base reg for SW is in bytes - &offs_reg, - owning_span.clone(), - ); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::ADD( - offs_reg.clone(), - base_reg, - offs_reg.clone(), - )), - comment: "store absolute offset".into(), - owning_span: owning_span.clone(), - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - offs_reg, - stored_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: "store value".into(), - owning_span, - }); - } else { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - base_reg, - stored_reg, - VirtualImmediate12 { - value: word_offs as u16, - }, - )), - comment: "store value".into(), - owning_span, - }); - } - } else { - let base_reg = self.stack_base_reg.as_ref().unwrap().clone(); - - // Bigger than 1 word needs a MCPI. XXX Or MCP if it's huge. - let dest_offs_reg = self.reg_seqr.next(); - if word_offs * 8 > compiler_constants::TWELVE_BITS { - self.number_to_reg(word_offs * 8, &dest_offs_reg, owning_span.clone()); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - dest_offs_reg.clone(), - base_reg, - dest_offs_reg.clone(), - )), - comment: "get store offset".into(), - owning_span: owning_span.clone(), - }); - } else { - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - dest_offs_reg.clone(), - base_reg, - VirtualImmediate12 { - value: (word_offs * 8) as u16, - }, - )), - comment: "get store offset".into(), - owning_span: owning_span.clone(), - }); - } - - if store_size_in_words * 8 > compiler_constants::TWELVE_BITS { - let size_reg = self.reg_seqr.next(); - self.number_to_reg( - store_size_in_words * 8, - &size_reg, - owning_span.clone(), - ); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCP( - dest_offs_reg, - stored_reg, - size_reg, - )), - comment: "store value".into(), - owning_span, - }); - } else { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCPI( - dest_offs_reg, - stored_reg, - VirtualImmediate12 { - value: (store_size_in_words * 8) as u16, - }, - )), - comment: "store value".into(), - owning_span, - }); - } - } - } - }, - }; - ok((), Vec::new(), Vec::new()) - } - - fn resolve_ptr(&mut self, ptr_val: &Value) -> CompileResult<(Pointer, Pointer, u64)> { - match &self.context.values[ptr_val.0].value { - ValueDatum::Instruction(Instruction::GetPointer { - base_ptr, - ptr_ty, - offset, - }) => ok((*base_ptr, *ptr_ty, *offset), Vec::new(), Vec::new()), - _otherwise => err( - Vec::new(), - vec![CompileError::Internal( - "Pointer arg for load/store is not a get_ptr instruction.", - self.md_mgr - .val_to_span(self.context, *ptr_val) - .unwrap_or_else(Self::empty_span), - )], - ), - } - } - - fn initialise_non_aggregate_type( - &mut self, - constant: &Constant, - span: Option, - ) -> VirtualRegister { - let value_size = ir_type_size_in_bytes(self.context, &constant.ty); - if size_bytes_in_words!(value_size) == 1 { - match constant.value { - ConstantValue::Unit | ConstantValue::Bool(false) | ConstantValue::Uint(0) => { - return VirtualRegister::Constant(ConstantRegister::Zero) - } - - ConstantValue::Bool(true) | ConstantValue::Uint(1) => { - return VirtualRegister::Constant(ConstantRegister::One) - } - _ => (), - } - } - - // Get the constant into the namespace. - let lit = ir_constant_to_ast_literal(constant); - let data_id = self.data_section.insert_data_value(&lit); - - // Allocate a register for it, and a load instruction. - let reg = self.reg_seqr.next(); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::LWDataId(reg.clone(), data_id)), - comment: "literal instantiation".into(), - owning_span: span, - }); - - // Insert the value into the map. - //self.reg_map.insert(*value, reg.clone()); - // - // Actually, no, don't. It's possible for constant values to be - // reused in the IR, especially with transforms which copy blocks - // around, like inlining. The `LW`/`LWDataId` instruction above - // initialises that constant value but it may be in a conditional - // block and not actually get evaluated for every possible - // execution. So using the register later on by pulling it from - // `self.reg_map` will have a potentially uninitialised register. - // - // By not putting it in the map we recreate the `LW` each time it's - // used, which also isn't ideal. A better solution is to put this - // initialisation into the IR itself, and allow for analysis there - // to determine when it may be initialised and/or reused. - - // Return register. - reg - } - - fn initialise_aggregate_type( - &mut self, - constant: &Constant, - value_type: &Type, - span: Option, - ) -> VirtualRegister { - // A constant struct or array. We still allocate space for it on - // the stack, but create the field or element initialisers - // recursively. - - // Get the total size using the value type. We shouldn't use constant.ty here because - // the actual type might containt unions which constant.ty doesn't account for. - let total_size = size_bytes_round_up_to_word_alignment!(ir_type_size_in_bytes( - self.context, - value_type, - )); - if total_size > compiler_constants::TWENTY_FOUR_BITS { - todo!("Enormous stack usage for locals."); - } - - let start_reg = self.reg_seqr.next(); - - // We can have zero sized structs and maybe arrays? - if total_size > 0 { - // Save the stack pointer. - self.bytecode.push(Op::unowned_register_move_comment( - start_reg.clone(), - VirtualRegister::Constant(ConstantRegister::StackPointer), - "save register for temporary stack value", - )); - - let mut alloc_op = Op::unowned_stack_allocate_memory(VirtualImmediate24 { - value: total_size as u32, - }); - alloc_op.comment = format!( - "allocate {} bytes for temporary {}", - total_size, - if matches!(&constant.value, ConstantValue::Struct(_)) { - "struct" - } else { - "array" - }, - ); - self.bytecode.push(alloc_op); - - // Fill in the fields. - self.initialise_constant_memory(constant, value_type, &start_reg, 0, span); - } - - // Return the start ptr. - start_reg - } - - // Get the reg corresponding to `value`. Returns None if the value is not in reg_map or is not - // a constant. - fn value_to_register_or_none(&mut self, value: &Value) -> Option { - let value_type = value.get_type(self.context).unwrap(); - match self.reg_map.get(value) { - Some(reg) => Some(reg.clone()), - None => { - match &self.context.values[value.0].value { - // Handle constants. - ValueDatum::Constant(constant) => { - let span = self.md_mgr.val_to_span(self.context, *value); - match &value_type { - Type::Unit - | Type::Bool - | Type::Uint(_) - | Type::B256 - | Type::String(_) - | Type::Pointer(_) => { - Some(self.initialise_non_aggregate_type(constant, span)) - } - Type::Array(_) | Type::Struct(_) | Type::Union(_) => { - Some(self.initialise_aggregate_type(constant, &value_type, span)) - } - } - } - _otherwise => None, - } - } - } - } - - // Same as `value_to_register_or_none` but returns a new register if no register is found or if - // `value` is not a constant. - fn value_to_register(&mut self, value: &Value) -> VirtualRegister { - match self.value_to_register_or_none(value) { - Some(reg) => reg, - None => { - // Just make a new register for this value. - let reg = self.reg_seqr.next(); - self.reg_map.insert(*value, reg.clone()); - reg - } - } - } - - fn number_to_reg(&mut self, offset: u64, offset_reg: &VirtualRegister, span: Option) { - if offset > compiler_constants::TWENTY_FOUR_BITS { - todo!("Absolutely giant arrays."); - } - - // Use bitwise ORs and SHIFTs to crate a 24 bit value in a register. - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ORI( - offset_reg.clone(), - VirtualRegister::Constant(ConstantRegister::Zero), - VirtualImmediate12 { - value: (offset >> 12) as u16, - }, - )), - comment: "get extract offset high bits".into(), - owning_span: span.clone(), - }); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::SLLI( - offset_reg.clone(), - offset_reg.clone(), - VirtualImmediate12 { value: 12 }, - )), - comment: "shift extract offset high bits".into(), - owning_span: span.clone(), - }); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ORI( - offset_reg.clone(), - offset_reg.clone(), - VirtualImmediate12 { - value: (offset & 0xfff) as u16, - }, - )), - comment: "get extract offset low bits".into(), - owning_span: span, - }); - } - - // Insert asm instructions to initialise a stack variable of type `value_type` with a Constant - // `constant`. Here, `value_type` accounts for the fact that the stack variable might include - // unions. - // - // If the initialiser is smaller than `value_type` (e.g. initialising a union with one of - // its small variants), add zero padding. - fn initialise_constant_memory( - &mut self, - constant: &Constant, - value_type: &Type, - start_reg: &VirtualRegister, - offs_in_words: u64, - span: Option, - ) -> u64 { - let value_size = ir_type_size_in_bytes(self.context, value_type); - let value_size_in_words = size_bytes_in_words!(value_size); - - if matches!(constant.value, ConstantValue::Undef) { - // We don't need to actually create an initialiser, but we do need to return the - // field size in words. - return size_bytes_in_words!(value_size); - } - - match &value_type { - Type::Unit | Type::Bool | Type::Uint(_) | Type::Pointer(_) => { - // Get the constant into the namespace. - let lit = ir_constant_to_ast_literal(constant); - let data_id = self.data_section.insert_data_value(&lit); - - // Load the initialiser value. - let init_reg = self.reg_seqr.next(); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::LWDataId(init_reg.clone(), data_id)), - comment: "literal instantiation for aggregate field".into(), - owning_span: span.clone(), - }); - - if offs_in_words > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg(offs_in_words, &offs_reg, span.clone()); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - start_reg.clone(), - start_reg.clone(), - offs_reg.clone(), - )), - comment: "calculate byte offset to aggregate field".into(), - owning_span: span.clone(), - }); - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - start_reg.clone(), - init_reg, - VirtualImmediate12 { value: 0 }, - )), - comment: "initialise aggregate field".into(), - owning_span: span, - }); - } else { - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::SW( - start_reg.clone(), - init_reg, - VirtualImmediate12 { - value: offs_in_words as u16, - }, - )), - comment: "initialise aggregate field".into(), - owning_span: span, - }); - } - - 1 - } - Type::B256 | Type::String(_) => { - // Get the constant into the namespace. - let lit = ir_constant_to_ast_literal(constant); - let data_id = self.data_section.insert_data_value(&lit); - - // Load the initialiser value. - let init_reg = self.reg_seqr.next(); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::LWDataId(init_reg.clone(), data_id)), - comment: "literal instantiation for aggregate field".into(), - owning_span: span.clone(), - }); - - // Write the initialiser to memory. Most Literals are 1 word, B256 is 32 bytes and - // needs to use a MCP instruction. - let offs_reg = self.reg_seqr.next(); - if offs_in_words * 8 > compiler_constants::TWELVE_BITS { - self.number_to_reg(offs_in_words * 8, &offs_reg, span.clone()); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - offs_reg.clone(), - start_reg.clone(), - offs_reg.clone(), - )), - comment: "calculate byte offset to aggregate field".into(), - owning_span: span.clone(), - }); - } else { - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - offs_reg.clone(), - start_reg.clone(), - VirtualImmediate12 { - value: (offs_in_words * 8) as u16, - }, - )), - comment: "calculate byte offset to aggregate field".into(), - owning_span: span.clone(), - }); - } - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCPI( - offs_reg, - init_reg, - VirtualImmediate12 { - value: value_size as u16, - }, - )), - comment: "initialise aggregate field".into(), - owning_span: span, - }); - - value_size_in_words - } - Type::Array(aggregate) => { - match (&constant.value, &self.context.aggregates[aggregate.0]) { - (ConstantValue::Array(items), AggregateContent::ArrayType(element_type, _)) => { - // Recurse for each item, accumulating the field offset and the final size. - items.iter().fold(0, |local_offs, item| { - local_offs - + self.initialise_constant_memory( - item, - element_type, - start_reg, - offs_in_words + local_offs, - span.clone(), - ) - }) - } - _ => unreachable!("Inconsistent types for constant initialisation"), - } - } - Type::Struct(aggregate) => { - match (&constant.value, &self.context.aggregates[aggregate.0]) { - (ConstantValue::Struct(items), AggregateContent::FieldTypes(field_tys)) => { - // Recurse for each item, accumulating the field offset and the final size. - items.iter().zip(field_tys.iter()).fold( - 0, - |local_offs, (item, field_tys)| { - local_offs - + self.initialise_constant_memory( - item, - field_tys, - start_reg, - offs_in_words + local_offs, - span.clone(), - ) - }, - ) - } - _ => unreachable!("Inconsistent types for constant initialisation"), - } - } - Type::Union(_) => { - // If the literal we're trying to initialise with is smaller than than the actual - // size of the union, then a padding of zeros is required. Calculate the size of - // the padding and set the appropriate bytes to zero. - let constant_size = ir_type_size_in_bytes(self.context, &constant.ty); - assert!(constant_size <= value_size); - let padding_size = value_size - constant_size; - let padding_size_in_words = size_bytes_in_words!(padding_size); - - if padding_size > 0 { - // Store padding of zeros and then store the value itself - let union_base_reg = self.reg_seqr.next(); - if offs_in_words * 8 > compiler_constants::TWELVE_BITS { - let offs_reg = self.reg_seqr.next(); - self.number_to_reg(offs_in_words * 8, &offs_reg, span.clone()); - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADD( - union_base_reg.clone(), - start_reg.clone(), - offs_reg.clone(), - )), - comment: "get base pointer for union".into(), - owning_span: span.clone(), - }); - } else { - self.bytecode.push(Op { - opcode: either::Either::Left(VirtualOp::ADDI( - union_base_reg.clone(), - start_reg.clone(), - VirtualImmediate12 { - value: (offs_in_words * 8) as u16, - }, - )), - comment: "get base pointer for union".into(), - owning_span: span.clone(), - }); - } - self.bytecode.push(Op { - opcode: Either::Left(VirtualOp::MCLI( - union_base_reg, - VirtualImmediate18 { - value: padding_size as u32, - }, - )), - comment: "clear padding for union initialisation".into(), - owning_span: span.clone(), - }); - } - - // Now do the actual initialisation - self.initialise_constant_memory( - constant, - &constant.ty, - start_reg, - offs_in_words + padding_size_in_words, - span, - ); - - value_size_in_words - } - } - } - - fn block_to_label(&mut self, block: &Block) -> Label { - match self.label_map.get(block) { - Some(label) => label.clone(), - None => { - let label = self.reg_seqr.get_label(); - self.label_map.insert(*block, label.clone()); - label - } - } - } -} - -fn ir_constant_to_ast_literal(constant: &Constant) -> Literal { - match &constant.value { - ConstantValue::Undef => unreachable!("Cannot convert 'undef' to a literal."), - ConstantValue::Unit => Literal::U64(0), // No unit. - ConstantValue::Bool(b) => Literal::Boolean(*b), - ConstantValue::Uint(n) => Literal::U64(*n), - ConstantValue::B256(bs) => Literal::B256(*bs), - ConstantValue::String(bs) => { - // ConstantValue::String bytes are guaranteed to be valid UTF8. - let s = std::str::from_utf8(bs).unwrap(); - Literal::String(Span::new(std::sync::Arc::from(s), 0, s.len(), None).unwrap()) - } - ConstantValue::Array(_) | ConstantValue::Struct(_) => { - unreachable!("Cannot convert aggregates to a literal.") - } - } -} - -// ------------------------------------------------------------------------------------------------- - -pub fn ir_type_size_in_bytes(context: &Context, ty: &Type) -> u64 { +pub(crate) fn ir_type_size_in_bytes(context: &Context, ty: &Type) -> u64 { match ty { Type::Unit | Type::Bool | Type::Uint(_) | Type::Pointer(_) => 8, Type::B256 => 32, Type::String(n) => size_bytes_round_up_to_word_alignment!(n), Type::Array(aggregate) => { - if let AggregateContent::ArrayType(el_ty, cnt) = &context.aggregates[aggregate.0] { + if let AggregateContent::ArrayType(el_ty, cnt) = aggregate.get_content(context) { cnt * ir_type_size_in_bytes(context, el_ty) } else { unreachable!("Wrong content for array.") } } Type::Struct(aggregate) => { - if let AggregateContent::FieldTypes(field_tys) = &context.aggregates[aggregate.0] { + if let AggregateContent::FieldTypes(field_tys) = aggregate.get_content(context) { // Sum up all the field sizes. field_tys .iter() @@ -2525,7 +175,7 @@ pub fn ir_type_size_in_bytes(context: &Context, ty: &Type) -> u64 { } } Type::Union(aggregate) => { - if let AggregateContent::FieldTypes(field_tys) = &context.aggregates[aggregate.0] { + if let AggregateContent::FieldTypes(field_tys) = aggregate.get_content(context) { // Find the max size for field sizes. field_tys .iter() @@ -2540,7 +190,7 @@ pub fn ir_type_size_in_bytes(context: &Context, ty: &Type) -> u64 { } // Aggregate (nested) field offset in words and size in bytes. -pub fn aggregate_idcs_to_field_layout( +pub(crate) fn aggregate_idcs_to_field_layout( context: &Context, ty: &Type, idcs: &[u64], @@ -2549,7 +199,7 @@ pub fn aggregate_idcs_to_field_layout( .fold(((0, 0), *ty), |((offs, _), ty), idx| match ty { Type::Struct(aggregate) => { let idx = *idx as usize; - let field_types = &context.aggregates[aggregate.0].field_types(); + let field_types = &aggregate.get_content(context).field_types(); let field_type = field_types[idx]; let field_offs_in_bytes = field_types .iter() @@ -2569,7 +219,7 @@ pub fn aggregate_idcs_to_field_layout( Type::Union(aggregate) => { let idx = *idx as usize; - let field_type = context.aggregates[aggregate.0].field_types()[idx]; + let field_type = aggregate.get_content(context).field_types()[idx]; let union_size_in_bytes = ir_type_size_in_bytes(context, &ty); let field_size_in_bytes = ir_type_size_in_bytes(context, &field_type); @@ -2608,7 +258,7 @@ mod tests { // // Run the tests! // - tracing::info!("---- IR To ASM: {:?} ----", path); + println!("---- IR To ASM: {:?} ----", path); test_ir_to_asm(path); } Some("asm") | Some("disabled") => (), diff --git a/sway-core/src/asm_generation/jump_optimized_asm_set.rs b/sway-core/src/asm_generation/jump_optimized_asm_set.rs deleted file mode 100644 index ba8a344cb0b..00000000000 --- a/sway-core/src/asm_generation/jump_optimized_asm_set.rs +++ /dev/null @@ -1,87 +0,0 @@ -use crate::asm_generation::{ - AbstractInstructionSet, DataSection, RegisterAllocatedAsmSet, RegisterSequencer, -}; -use std::fmt; - -/// Represents an ASM set which has had jump labels and jumps optimized -pub enum JumpOptimizedAsmSet { - ContractAbi { - data_section: DataSection, - program_section: AbstractInstructionSet, - }, - ScriptMain { - data_section: DataSection, - program_section: AbstractInstructionSet, - }, - PredicateMain { - data_section: DataSection, - program_section: AbstractInstructionSet, - }, - // Libraries do not generate any asm. - Library, -} - -impl JumpOptimizedAsmSet { - pub(crate) fn allocate_registers( - self, - register_sequencer: &mut RegisterSequencer, - ) -> RegisterAllocatedAsmSet { - match self { - JumpOptimizedAsmSet::Library => RegisterAllocatedAsmSet::Library, - JumpOptimizedAsmSet::ScriptMain { - data_section, - program_section, - } => { - let program_section = program_section - .realize_labels(&data_section) - .allocate_registers(register_sequencer); - RegisterAllocatedAsmSet::ScriptMain { - data_section, - program_section, - } - } - JumpOptimizedAsmSet::PredicateMain { - data_section, - program_section, - } => { - let program_section = program_section - .realize_labels(&data_section) - .allocate_registers(register_sequencer); - RegisterAllocatedAsmSet::PredicateMain { - data_section, - program_section, - } - } - JumpOptimizedAsmSet::ContractAbi { - program_section, - data_section, - } => RegisterAllocatedAsmSet::ContractAbi { - program_section: program_section - .realize_labels(&data_section) - .allocate_registers(register_sequencer), - data_section, - }, - } - } -} - -impl fmt::Display for JumpOptimizedAsmSet { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - JumpOptimizedAsmSet::ScriptMain { - data_section, - program_section, - } => write!(f, "{}\n{}", program_section, data_section), - JumpOptimizedAsmSet::PredicateMain { - data_section, - program_section, - } => write!(f, "{}\n{}", program_section, data_section), - JumpOptimizedAsmSet::ContractAbi { - data_section, - program_section, - } => write!(f, "{}\n{}", program_section, data_section), - // Libraries do not directly generate any asm. - JumpOptimizedAsmSet::Library => write!(f, ""), - } - } -} diff --git a/sway-core/src/asm_generation/mod.rs b/sway-core/src/asm_generation/mod.rs index eb648c61c76..11318060685 100644 --- a/sway-core/src/asm_generation/mod.rs +++ b/sway-core/src/asm_generation/mod.rs @@ -1,33 +1,26 @@ -use crate::{ - asm_lang::{ - allocated_ops::AllocatedRegister, virtual_register::*, Label, Op, OrganizationalOp, - VirtualImmediate12, VirtualOp, - }, - parse_tree::Literal, -}; -use std::{collections::BTreeSet, fmt}; +use crate::asm_lang::{allocated_ops::AllocatedRegister, virtual_register::*}; -use either::Either; +use std::collections::BTreeSet; mod abstract_instruction_set; +mod allocated_abstract_instruction_set; +mod asm_builder; pub(crate) mod checks; pub(crate) mod compiler_constants; mod data_section; mod finalized_asm; pub mod from_ir; mod instruction_set; -mod jump_optimized_asm_set; -mod register_allocated_asm_set; +mod programs; pub(crate) mod register_allocator; mod register_sequencer; pub use finalized_asm::FinalizedAsm; use abstract_instruction_set::*; +use allocated_abstract_instruction_set::*; pub(crate) use data_section::*; use instruction_set::*; -use jump_optimized_asm_set::*; -use register_allocated_asm_set::*; use register_sequencer::*; // Initially, the bytecode will have a lot of individual registers being used. Each register will @@ -63,24 +56,6 @@ use register_sequencer::*; /// The [SwayAsmSet] contains either a contract ABI and corresponding ASM, a script's main /// function's ASM, or a predicate's main function's ASM. ASM is never generated for libraries, /// as that happens when the library itself is imported. -pub enum SwayAsmSet { - ContractAbi { - data_section: DataSection, - program_section: AbstractInstructionSet, - }, - ScriptMain { - data_section: DataSection, - program_section: AbstractInstructionSet, - }, - #[allow(dead_code)] - PredicateMain { - data_section: DataSection, - program_section: AbstractInstructionSet, - }, - // Libraries do not generate any asm. - #[allow(dead_code)] - Library, -} #[derive(Debug)] struct RegisterAllocationStatus { @@ -96,8 +71,8 @@ pub(crate) struct RegisterPool { impl RegisterPool { fn init() -> Self { let reg_pool: Vec = (0 - // - 1 because we reserve the final register for the data_section begin - ..compiler_constants::NUM_ALLOCATABLE_REGISTERS) + // - 1 because we reserve the final register for the data_section begin + ..compiler_constants::NUM_ALLOCATABLE_REGISTERS) .map(|x| RegisterAllocationStatus { reg: AllocatedRegister::Allocated(x), used_by: BTreeSet::new(), @@ -122,181 +97,3 @@ impl RegisterPool { allocated_reg.map(|RegisterAllocationStatus { reg, used_by: _ }| reg.clone()) } } - -impl fmt::Display for SwayAsmSet { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - SwayAsmSet::ScriptMain { - data_section, - program_section, - } => write!(f, "{}\n{}", program_section, data_section), - SwayAsmSet::PredicateMain { - data_section, - program_section, - } => write!(f, "{}\n{}", program_section, data_section), - SwayAsmSet::ContractAbi { - data_section, - program_section, - } => write!(f, "{}\n{}", program_section, data_section), - // Libraries do not directly generate any asm. - SwayAsmSet::Library => write!(f, ""), - } - } -} - -impl SwayAsmSet { - pub(crate) fn remove_unnecessary_jumps(self) -> JumpOptimizedAsmSet { - match self { - SwayAsmSet::ScriptMain { - data_section, - program_section, - } => JumpOptimizedAsmSet::ScriptMain { - data_section, - program_section: program_section.remove_sequential_jumps(), - }, - SwayAsmSet::PredicateMain { - data_section, - program_section, - } => JumpOptimizedAsmSet::PredicateMain { - data_section, - program_section: program_section.remove_sequential_jumps(), - }, - SwayAsmSet::Library {} => JumpOptimizedAsmSet::Library, - SwayAsmSet::ContractAbi { - data_section, - program_section, - } => JumpOptimizedAsmSet::ContractAbi { - data_section, - program_section: program_section.remove_sequential_jumps(), - }, - } - } -} - -/// Builds the asm preamble, which includes metadata and a jump past the metadata. -/// Right now, it looks like this: -/// -/// WORD OP -/// 1 JI program_start -/// - NOOP -/// 2 DATA_START (0-32) (in bytes, offset from $is) -/// - DATA_START (32-64) -/// 3 LW $ds $is 1 (where 1 is in words and $is is a byte address to base off of) -/// - ADD $ds $ds $is -/// 4 .program_start: -fn build_preamble(register_sequencer: &mut RegisterSequencer) -> [Op; 6] { - let label = register_sequencer.get_label(); - [ - // word 1 - Op::jump_to_label(label.clone()), - // word 1.5 - Op { - opcode: Either::Left(VirtualOp::NOOP), - comment: "".into(), - owning_span: None, - }, - // word 2 -- full word u64 placeholder - Op { - opcode: Either::Right(OrganizationalOp::DataSectionOffsetPlaceholder), - comment: "data section offset".into(), - owning_span: None, - }, - Op::unowned_jump_label_comment(label, "end of metadata"), - // word 3 -- load the data offset into $ds - Op { - opcode: Either::Left(VirtualOp::DataSectionRegisterLoadPlaceholder), - comment: "".into(), - owning_span: None, - }, - // word 3.5 -- add $ds $ds $is - Op { - opcode: Either::Left(VirtualOp::ADD( - VirtualRegister::Constant(ConstantRegister::DataSectionStart), - VirtualRegister::Constant(ConstantRegister::DataSectionStart), - VirtualRegister::Constant(ConstantRegister::InstructionStart), - )), - comment: "".into(), - owning_span: None, - }, - ] -} - -/// Builds the contract switch statement, or function selector, which takes the selector -/// stored in the call frame (see https://github.com/FuelLabs/sway/issues/97#issuecomment-870150684 -/// for an explanation of its location) -fn build_contract_abi_switch( - register_sequencer: &mut RegisterSequencer, - data_section: &mut DataSection, - selectors_and_labels: Vec<([u8; 4], Label)>, -) -> Vec { - let input_selector_register = register_sequencer.next(); - let mut asm_buf = vec![Op { - opcode: Either::Right(OrganizationalOp::Comment), - comment: "Begin contract ABI selector switch".into(), - owning_span: None, - }]; - // load the selector from the call frame - asm_buf.push(Op { - opcode: Either::Left(VirtualOp::LW( - input_selector_register.clone(), - VirtualRegister::Constant(ConstantRegister::FramePointer), - // see https://github.com/FuelLabs/fuel-specs/pull/193#issuecomment-876496372 - // We expect the last four bytes of this word to contain the selector, and the first - // four bytes to all be 0. - VirtualImmediate12::new_unchecked(73, "constant infallible value"), - )), - comment: "load input function selector".into(), - owning_span: None, - }); - - for (selector, label) in selectors_and_labels { - // put the selector in the data section - let data_label = - data_section.insert_data_value(&Literal::U32(u32::from_be_bytes(selector))); - // load the data into a register for comparison - let prog_selector_register = register_sequencer.next(); - asm_buf.push(Op { - opcode: Either::Left(VirtualOp::LWDataId( - prog_selector_register.clone(), - data_label, - )), - comment: "load fn selector for comparison".into(), - owning_span: None, - }); - // compare with the input selector - let comparison_result_register = register_sequencer.next(); - asm_buf.push(Op { - opcode: Either::Left(VirtualOp::EQ( - comparison_result_register.clone(), - input_selector_register.clone(), - prog_selector_register, - )), - comment: "function selector comparison".into(), - owning_span: None, - }); - - // jump to the function label if the selector was equal - asm_buf.push(Op { - // if the comparison result is _not_ equal to 0, then it was indeed equal. - opcode: Either::Right(OrganizationalOp::JumpIfNotZero( - comparison_result_register, - label, - )), - comment: "jump to selected function".into(), - owning_span: None, - }); - } - - // if none of the selectors matched, then revert - asm_buf.push(Op { - // see https://github.com/FuelLabs/sway/issues/97#issuecomment-875674105 - // and https://github.com/FuelLabs/sway/issues/444#issuecomment-1012507337 - opcode: Either::Left(VirtualOp::RVRT(VirtualRegister::Constant( - ConstantRegister::Zero, - ))), - comment: "revert if no selectors matched".into(), - owning_span: None, - }); - - asm_buf -} diff --git a/sway-core/src/asm_generation/programs.rs b/sway-core/src/asm_generation/programs.rs new file mode 100644 index 00000000000..12c36eee478 --- /dev/null +++ b/sway-core/src/asm_generation/programs.rs @@ -0,0 +1,45 @@ +mod r#abstract; +mod allocated; +mod r#final; + +use super::{ + register_sequencer::RegisterSequencer, AbstractInstructionSet, AllocatedAbstractInstructionSet, + DataSection, InstructionSet, +}; + +use crate::asm_lang::Label; + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub(super) enum ProgramKind { + Script, + Contract, +} + +/// An AbstractProgram represents code generated by the compilation from IR, with virtual registers +/// and abstract control flow. +/// +/// Use `AbstractProgram::to_allocated_program()` to perform register allocation. +/// +pub(super) struct AbstractProgram { + kind: ProgramKind, + data_section: DataSection, + entries: Vec<(Option<[u8; 4]>, Label, AbstractInstructionSet)>, + non_entries: Vec, + reg_seqr: RegisterSequencer, +} + +/// An AllocatedProgram represents code which has allocated registers but still has abstract +/// control flow. +pub(super) struct AllocatedProgram { + kind: ProgramKind, + data_section: DataSection, + prologue: AllocatedAbstractInstructionSet, + functions: Vec, +} + +/// A FinalProgram represents code which may be serialized to VM bytecode. +pub(super) struct FinalProgram { + kind: ProgramKind, + data_section: DataSection, + ops: InstructionSet, +} diff --git a/sway-core/src/asm_generation/programs/abstract.rs b/sway-core/src/asm_generation/programs/abstract.rs new file mode 100644 index 00000000000..ac7d4202e55 --- /dev/null +++ b/sway-core/src/asm_generation/programs/abstract.rs @@ -0,0 +1,223 @@ +use super::{AbstractProgram, AllocatedProgram, ProgramKind}; + +use crate::{ + asm_generation::{ + compiler_constants, AbstractInstructionSet, AllocatedAbstractInstructionSet, DataSection, + Entry, RegisterSequencer, + }, + asm_lang::{ + allocated_ops::{AllocatedOpcode, AllocatedRegister}, + AllocatedAbstractOp, ConstantRegister, ControlFlowOp, Label, VirtualImmediate12, + VirtualImmediate18, + }, +}; + +use either::Either; + +impl AbstractProgram { + pub(crate) fn new( + kind: ProgramKind, + data_section: DataSection, + entries: Vec<(Option<[u8; 4]>, Label, AbstractInstructionSet)>, + non_entries: Vec, + reg_seqr: RegisterSequencer, + ) -> Self { + AbstractProgram { + kind, + data_section, + entries, + non_entries, + reg_seqr, + } + } + + pub(crate) fn into_allocated_program(mut self) -> AllocatedProgram { + // Build our bytecode prologue which has a preamble and for contracts is the switch based on + // function selector. + let mut prologue = self.build_preamble(); + + if self.kind == ProgramKind::Contract { + self.build_contract_abi_switch(&mut prologue); + } + + // Allocate the registers for each function. + let functions = self + .entries + .into_iter() + .map(|(_, _, fn_ops)| fn_ops) + .chain(self.non_entries.into_iter()) + .map(AbstractInstructionSet::optimize) + .map(|fn_ops| fn_ops.allocate_registers(&mut self.reg_seqr)) + .map(AllocatedAbstractInstructionSet::emit_pusha_popa) + .collect(); + + // XXX need to verify that the stack use for each function is balanced. + + AllocatedProgram { + kind: self.kind, + data_section: self.data_section, + prologue, + functions, + } + } + + /// Builds the asm preamble, which includes metadata and a jump past the metadata. + /// Right now, it looks like this: + /// + /// WORD OP + /// 1 JI program_start + /// - NOOP + /// 2 DATA_START (0-32) (in bytes, offset from $is) + /// - DATA_START (32-64) + /// 3 LW $ds $is 1 (where 1 is in words and $is is a byte address to base off of) + /// - ADD $ds $ds $is + /// 4 .program_start: + fn build_preamble(&mut self) -> AllocatedAbstractInstructionSet { + let label = self.reg_seqr.get_label(); + AllocatedAbstractInstructionSet { + ops: [ + // word 1 + AllocatedAbstractOp { + opcode: Either::Right(ControlFlowOp::Jump(label)), + comment: String::new(), + owning_span: None, + }, + // word 1.5 + AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::NOOP), + comment: "".into(), + owning_span: None, + }, + // word 2 -- full word u64 placeholder + AllocatedAbstractOp { + opcode: Either::Right(ControlFlowOp::DataSectionOffsetPlaceholder), + comment: "data section offset".into(), + owning_span: None, + }, + AllocatedAbstractOp { + opcode: Either::Right(ControlFlowOp::Label(label)), + comment: "end of metadata".into(), + owning_span: None, + }, + // word 3 -- load the data offset into $ds + AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::DataSectionRegisterLoadPlaceholder), + comment: "".into(), + owning_span: None, + }, + // word 3.5 -- add $ds $ds $is + AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::ADD( + AllocatedRegister::Constant(ConstantRegister::DataSectionStart), + AllocatedRegister::Constant(ConstantRegister::DataSectionStart), + AllocatedRegister::Constant(ConstantRegister::InstructionStart), + )), + comment: "".into(), + owning_span: None, + }, + ] + .to_vec(), + } + } + + /// Builds the contract switch statement based on the first argument to a contract call: the + /// 'selector'. + /// See https://github.com/FuelLabs/fuel-specs/blob/master/specs/vm/main.md#call-frames which + /// describes the first argument to be at word offset 73. + fn build_contract_abi_switch(&mut self, asm_buf: &mut AllocatedAbstractInstructionSet) { + const SELECTOR_WORD_OFFSET: u64 = 73; + const INPUT_SELECTOR_REG: AllocatedRegister = AllocatedRegister::Allocated(0); + const PROG_SELECTOR_REG: AllocatedRegister = AllocatedRegister::Allocated(1); + const CMP_RESULT_REG: AllocatedRegister = AllocatedRegister::Allocated(2); + + // Build the switch statement for selectors. + asm_buf.ops.push(AllocatedAbstractOp { + opcode: Either::Right(ControlFlowOp::Comment), + comment: "Begin contract ABI selector switch".into(), + owning_span: None, + }); + + // Load the selector from the call frame. + asm_buf.ops.push(AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::LW( + INPUT_SELECTOR_REG, + AllocatedRegister::Constant(ConstantRegister::FramePointer), + VirtualImmediate12::new_unchecked( + SELECTOR_WORD_OFFSET, + "constant infallible value", + ), + )), + comment: "load input function selector".into(), + owning_span: None, + }); + + // Add a 'case' entry for each selector. + for (opt_selector, label, _) in &self.entries { + // Put the selector in the data section. + let data_label = self.data_section.insert_data_value(Entry::new_word( + u32::from_be_bytes( + opt_selector.expect("Entries for contracts must have a selector."), + ) as u64, + None, + )); + + // Load the data into a register for comparison. + asm_buf.ops.push(AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::LWDataId(PROG_SELECTOR_REG, data_label)), + comment: "load fn selector for comparison".into(), + owning_span: None, + }); + + // Compare with the input selector. + asm_buf.ops.push(AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::EQ( + CMP_RESULT_REG, + INPUT_SELECTOR_REG, + PROG_SELECTOR_REG, + )), + comment: "function selector comparison".into(), + owning_span: None, + }); + + // Jump to the function label if the selector was equal. + asm_buf.ops.push(AllocatedAbstractOp { + // If the comparison result is _not_ equal to 0, then it was indeed equal. + opcode: Either::Right(ControlFlowOp::JumpIfNotZero(CMP_RESULT_REG, *label)), + comment: "jump to selected function".into(), + owning_span: None, + }); + } + + // If none of the selectors matched, then revert. This may change in the future, see + // https://github.com/FuelLabs/sway/issues/444 + asm_buf.ops.push(AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::MOVI( + AllocatedRegister::Constant(ConstantRegister::Scratch), + VirtualImmediate18 { + value: compiler_constants::MISMATCHED_SELECTOR_REVERT_CODE, + }, + )), + comment: "special code for mismatched selector".into(), + owning_span: None, + }); + asm_buf.ops.push(AllocatedAbstractOp { + opcode: Either::Left(AllocatedOpcode::RVRT(AllocatedRegister::Constant( + ConstantRegister::Scratch, + ))), + comment: "revert if no selectors matched".into(), + owning_span: None, + }); + } +} + +impl std::fmt::Display for AbstractProgram { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + for (_, _, func) in &self.entries { + writeln!(f, "{func}")?; + } + for func in &self.non_entries { + writeln!(f, "{func}")?; + } + write!(f, "{}", self.data_section) + } +} diff --git a/sway-core/src/asm_generation/programs/allocated.rs b/sway-core/src/asm_generation/programs/allocated.rs new file mode 100644 index 00000000000..577d7524d52 --- /dev/null +++ b/sway-core/src/asm_generation/programs/allocated.rs @@ -0,0 +1,39 @@ +use super::{AllocatedProgram, FinalProgram}; + +use crate::asm_generation::{AllocatedAbstractInstructionSet, InstructionSet}; + +impl AllocatedProgram { + pub(crate) fn into_final_program(self) -> FinalProgram { + // Concat the prologue and all the functions together. + let abstract_ops = AllocatedAbstractInstructionSet { + ops: std::iter::once(self.prologue.ops) + .chain(self.functions.into_iter().map(|f| f.ops)) + .flatten() + .collect(), + }; + + let realized_ops = abstract_ops.realize_labels(&self.data_section); + let ops = InstructionSet { + ops: realized_ops.pad_to_even(), + }; + + FinalProgram { + kind: self.kind, + data_section: self.data_section, + ops, + } + } +} + +impl std::fmt::Display for AllocatedProgram { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + writeln!(f, ";; {:?}", self.kind)?; + writeln!(f, ";; --- Prologue ---\n{}\n", self.prologue)?; + writeln!(f, ";; --- Functions ---")?; + for function in &self.functions { + writeln!(f, "{function}\n")?; + } + writeln!(f, ";; --- Data ---")?; + writeln!(f, "{}", self.data_section) + } +} diff --git a/sway-core/src/asm_generation/programs/final.rs b/sway-core/src/asm_generation/programs/final.rs new file mode 100644 index 00000000000..701a6be5390 --- /dev/null +++ b/sway-core/src/asm_generation/programs/final.rs @@ -0,0 +1,24 @@ +use super::{FinalProgram, ProgramKind}; + +use crate::FinalizedAsm; + +impl FinalProgram { + pub(crate) fn finalize(self) -> FinalizedAsm { + match self.kind { + ProgramKind::Script => FinalizedAsm::ScriptMain { + data_section: self.data_section, + program_section: self.ops, + }, + ProgramKind::Contract => FinalizedAsm::ContractAbi { + data_section: self.data_section, + program_section: self.ops, + }, + } + } +} + +impl std::fmt::Display for FinalProgram { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}\n{}", self.ops, self.data_section) + } +} diff --git a/sway-core/src/asm_generation/register_allocated_asm_set.rs b/sway-core/src/asm_generation/register_allocated_asm_set.rs deleted file mode 100644 index 62e2bd739be..00000000000 --- a/sway-core/src/asm_generation/register_allocated_asm_set.rs +++ /dev/null @@ -1,113 +0,0 @@ -use crate::{ - asm_generation::{DataSection, FinalizedAsm, InstructionSet}, - asm_lang::allocated_ops::AllocatedOp, -}; -use std::fmt; - -/// Represents an ASM set which has had registers allocated -pub enum RegisterAllocatedAsmSet { - ContractAbi { - data_section: DataSection, - program_section: InstructionSet, - }, - ScriptMain { - data_section: DataSection, - program_section: InstructionSet, - }, - PredicateMain { - data_section: DataSection, - program_section: InstructionSet, - }, - // Libraries do not generate any asm. - Library, -} - -impl RegisterAllocatedAsmSet { - pub(crate) fn optimize(self) -> FinalizedAsm { - // TODO implement this -- noop for now - match self { - RegisterAllocatedAsmSet::Library => FinalizedAsm::Library, - RegisterAllocatedAsmSet::ScriptMain { - mut program_section, - data_section, - } => { - // ensure there's an even number of ops so the - // data section offset is valid - if program_section.ops.len() & 1 != 0 { - program_section.ops.push(AllocatedOp { - opcode: crate::asm_lang::allocated_ops::AllocatedOpcode::NOOP, - comment: "word-alignment of data section".into(), - owning_span: None, - }); - } - FinalizedAsm::ScriptMain { - program_section, - data_section, - } - } - RegisterAllocatedAsmSet::PredicateMain { - mut program_section, - data_section, - } => { - // ensure there's an even number of ops so the - // data section offset is valid - if program_section.ops.len() & 1 != 0 { - program_section.ops.push(AllocatedOp { - opcode: crate::asm_lang::allocated_ops::AllocatedOpcode::NOOP, - comment: "word-alignment of data section".into(), - owning_span: None, - }); - } - FinalizedAsm::PredicateMain { - program_section, - data_section, - } - } - RegisterAllocatedAsmSet::ContractAbi { - mut program_section, - data_section, - } => { - // ensure there's an even number of ops so the - // data section offset is valid - if program_section.ops.len() & 1 != 0 { - program_section.ops.push(AllocatedOp { - opcode: crate::asm_lang::allocated_ops::AllocatedOpcode::NOOP, - comment: "word-alignment of data section".into(), - owning_span: None, - }); - } - FinalizedAsm::ContractAbi { - program_section, - data_section, - } - } - } - } -} - -impl fmt::Display for RegisterAllocatedAsmSet { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - RegisterAllocatedAsmSet::ScriptMain { - program_section, - data_section, - } => { - write!(f, "{}\n{}", program_section, data_section) - } - RegisterAllocatedAsmSet::PredicateMain { - program_section, - data_section, - } => { - write!(f, "{}\n{}", program_section, data_section) - } - RegisterAllocatedAsmSet::ContractAbi { - program_section, - data_section, - } => { - write!(f, "{}\n{}", program_section, data_section) - } - // Libraries do not directly generate any asm. - RegisterAllocatedAsmSet::Library => write!(f, ""), - } - } -} diff --git a/sway-core/src/asm_generation/register_allocator.rs b/sway-core/src/asm_generation/register_allocator.rs index 7a97e6fce30..e8f21738797 100644 --- a/sway-core/src/asm_generation/register_allocator.rs +++ b/sway-core/src/asm_generation/register_allocator.rs @@ -1,10 +1,15 @@ -use crate::asm_generation::{ - register_sequencer::RegisterSequencer, RegisterAllocationStatus, RegisterPool, +use crate::{ + asm_generation::{ + register_sequencer::RegisterSequencer, RegisterAllocationStatus, RegisterPool, + }, + asm_lang::{virtual_register::*, Op, VirtualOp}, }; -use crate::asm_lang::{virtual_register::*, RealizedOp, VirtualOp}; -use petgraph::graph::NodeIndex; + use std::collections::{BTreeSet, HashMap}; +use either::Either; +use petgraph::graph::NodeIndex; + pub type InterferenceGraph = petgraph::stable_graph::StableGraph; @@ -36,7 +41,7 @@ pub type InterferenceGraph = /// for each instruction op (traversed in reverse topological order of the CFG) /// prev_live_in(op) = live_in(op) /// prev_live_out(op) = live_out(op) -/// live_out(op) = live_in(s_1) UNIONl ive_in(s_2) UNION live_in(s_3) UNION ... +/// live_out(op) = live_in(s_1) UNION live_in(s_2) UNION live_in(s_3) UNION ... /// where s_1, s_2, s_3, ... are all the successors of op in the CFG. /// live_in(op) = use(op) UNION (live_out(op) - def(op)) /// until prev_live_in(op) = live_in(op) @@ -50,7 +55,7 @@ pub type InterferenceGraph = /// This function finally returns `live_out` because it has all the liveness information needed. /// `live_in` is computed because it is needed to compute `live_out` iteratively. /// -pub(crate) fn liveness_analysis(ops: &[RealizedOp]) -> HashMap> { +pub(crate) fn liveness_analysis(ops: &[Op]) -> HashMap> { // Hash maps that will reprsent the live_in and live_out tables. The key of each hash map is // simply the index of each instruction in the `ops` vector. let mut live_in: HashMap> = @@ -58,10 +63,6 @@ pub(crate) fn liveness_analysis(ops: &[RealizedOp]) -> HashMap> = HashMap::from_iter((0..ops.len()).into_iter().map(|idx| (idx, BTreeSet::new()))); - // Simple mapping between the actual offset of an instruction and its index in the `ops` - // vector. - let offset_to_ix = HashMap::from_iter(ops.iter().enumerate().map(|(idx, op)| (op.offset, idx))); - let mut modified = true; while modified { modified = false; @@ -71,8 +72,8 @@ pub(crate) fn liveness_analysis(ops: &[RealizedOp]) -> HashMap HashMap HashMap>, ) -> (InterferenceGraph, HashMap) { let mut interference_graph = InterferenceGraph::with_capacity(0, 0); @@ -142,7 +143,7 @@ pub(crate) fn create_interference_graph( // Get all virtual registers used by the intermediate assembly and add them to the graph ops.iter() .fold(BTreeSet::new(), |mut tree, elem| { - let mut regs = elem.opcode.registers(); + let mut regs = elem.registers(); regs.retain(|®| matches!(reg, VirtualRegister::Virtual(_))); tree.extend(regs.into_iter()); tree @@ -154,7 +155,7 @@ pub(crate) fn create_interference_graph( for (ix, regs) in live_out { match &ops[*ix].opcode { - VirtualOp::MOVE(v, c) => { + Either::Left(VirtualOp::MOVE(v, c)) => { if let Some(ix1) = reg_to_node_map.get(v) { for b in regs.iter() { if let Some(ix2) = reg_to_node_map.get(b) { @@ -169,7 +170,7 @@ pub(crate) fn create_interference_graph( } } _ => { - for v in &ops[*ix].opcode.def_registers() { + for v in &ops[*ix].def_registers() { if let Some(ix1) = reg_to_node_map.get(v) { for b in regs.iter() { if let Some(ix2) = reg_to_node_map.get(b) { @@ -202,33 +203,21 @@ pub(crate) fn create_interference_graph( /// `jnzi for now). /// pub(crate) fn coalesce_registers( - ops: &[RealizedOp], + ops: &[Op], interference_graph: &mut InterferenceGraph, reg_to_node_map: &mut HashMap, register_sequencer: &mut RegisterSequencer, -) -> Vec { +) -> Vec { // A map from the virtual registers that are removed to the virtual registers that they are // replaced with during the coalescing process. let mut reg_to_reg_map: HashMap = HashMap::new(); // To hold the final *reduced* list of ops - let mut reduced_ops: Vec = vec![]; - - // To figure out a mapping between the old offset and the new offset for each instruction. Will - // help determine the new "immediate values" for jump instructions. - let mut offset_map: HashMap = HashMap::new(); - let mut num_moves_removed = 0; + let mut reduced_ops: Vec = vec![]; for op in ops { - let new_op = RealizedOp { - opcode: op.opcode.clone(), - owning_span: op.owning_span.clone(), - comment: op.comment.clone(), - offset: op.offset - num_moves_removed, - }; - offset_map.insert(op.offset, op.offset - num_moves_removed); match &op.opcode { - VirtualOp::MOVE(x, y) => { + Either::Left(VirtualOp::MOVE(x, y)) => { match (x, y) { (VirtualRegister::Virtual(_), VirtualRegister::Virtual(_)) => { // Use reg_to_reg_map to figure out what x and y have been replaced @@ -253,7 +242,6 @@ pub(crate) fn coalesce_registers( // If r1 and r2 are the same, the MOVE instruction can be safely removed, // i.e., not added to reduced_ops if r1 == r2 { - num_moves_removed += 1; continue; } @@ -261,7 +249,7 @@ pub(crate) fn coalesce_registers( // respective liveness ranges overalp), preserve the MOVE instruction by // adding it to reduced_ops if interference_graph.contains_edge(*ix1, *ix2) { - reduced_ops.push(new_op); + reduced_ops.push(op.clone()); continue; } @@ -302,28 +290,21 @@ pub(crate) fn coalesce_registers( reg_to_node_map.insert(r2.clone(), new_ix); reg_to_reg_map.insert(r1.clone(), new_reg.clone()); reg_to_reg_map.insert(r2.clone(), new_reg.clone()); - - num_moves_removed += 1; } _ => { // Preserve the MOVE instruction if either registers used in the MOVE is // special registers (i.e. *not* a VirtualRegister::Virtual(_)) - reduced_ops.push(new_op); + reduced_ops.push(op.clone()); } } } _ => { // Preserve all other instructions - reduced_ops.push(new_op); + reduced_ops.push(op.clone()); } } } - // Update immediate values for jump instructions using offset_map - for new_op in &mut reduced_ops { - new_op.opcode = new_op.opcode.update_jump_immediate_values(&offset_map); - } - // Create a *final* reg-to-reg map that We keep looking for mappings within reg_to_reg_map // until we find a register that doesn't map to any other. let mut final_reg_to_reg_map: HashMap = HashMap::new(); @@ -337,7 +318,7 @@ pub(crate) fn coalesce_registers( // Update the registers for all instructions using final_reg_to_reg_map for new_op in &mut reduced_ops { - new_op.opcode = new_op.opcode.update_register(&final_reg_to_reg_map); + *new_op = new_op.update_register(&final_reg_to_reg_map); } reduced_ops @@ -409,9 +390,9 @@ pub(crate) fn assign_registers( } else { // Error out for now if no available register is found unimplemented!( - "The allocator cannot resolve a register mapping for this program. - This is a temporary artifact of the extremely early stage version of this language. - Try to lower the number of variables you use." + "The allocator cannot resolve a register mapping for this program. \ + This is a temporary artifact of the extremely early stage version \ + of this language. Try to lower the number of variables you use." ); } } diff --git a/sway-core/src/asm_lang/allocated_ops.rs b/sway-core/src/asm_lang/allocated_ops.rs index 44c9fac1cb3..610297e97d3 100644 --- a/sway-core/src/asm_lang/allocated_ops.rs +++ b/sway-core/src/asm_lang/allocated_ops.rs @@ -21,7 +21,7 @@ const COMMENT_START_COLUMN: usize = 30; /// Represents registers that have gone through register allocation. The value in the [Allocated] /// variant is guaranteed to be between 0 and [compiler_constants::NUM_ALLOCATABLE_REGISTERS]. -#[derive(Hash, PartialEq, Eq, Debug, Clone)] +#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Debug, Clone)] pub enum AllocatedRegister { Allocated(u8), Constant(super::ConstantRegister), @@ -92,6 +92,7 @@ pub(crate) enum AllocatedOpcode { SUBI(AllocatedRegister, AllocatedRegister, VirtualImmediate12), XOR(AllocatedRegister, AllocatedRegister, AllocatedRegister), XORI(AllocatedRegister, AllocatedRegister, VirtualImmediate12), + JMP(AllocatedRegister), JI(VirtualImmediate24), JNEI(AllocatedRegister, AllocatedRegister, VirtualImmediate12), JNZI(AllocatedRegister, VirtualImmediate18), @@ -172,6 +173,192 @@ pub(crate) enum AllocatedOpcode { DataSectionRegisterLoadPlaceholder, } +impl AllocatedOpcode { + pub(crate) fn def_registers(&self) -> BTreeSet<&AllocatedRegister> { + use AllocatedOpcode::*; + (match self { + ADD(r1, _r2, _r3) => vec![r1], + ADDI(r1, _r2, _i) => vec![r1], + AND(r1, _r2, _r3) => vec![r1], + ANDI(r1, _r2, _i) => vec![r1], + DIV(r1, _r2, _r3) => vec![r1], + DIVI(r1, _r2, _i) => vec![r1], + EQ(r1, _r2, _r3) => vec![r1], + EXP(r1, _r2, _r3) => vec![r1], + EXPI(r1, _r2, _i) => vec![r1], + GT(r1, _r2, _r3) => vec![r1], + GTF(r1, _r2, _i) => vec![r1], + LT(r1, _r2, _r3) => vec![r1], + MLOG(r1, _r2, _r3) => vec![r1], + MROO(r1, _r2, _r3) => vec![r1], + MOD(r1, _r2, _r3) => vec![r1], + MODI(r1, _r2, _i) => vec![r1], + MOVE(r1, _r2) => vec![r1], + MOVI(r1, _i) => vec![r1], + MUL(r1, _r2, _r3) => vec![r1], + MULI(r1, _r2, _i) => vec![r1], + NOT(r1, _r2) => vec![r1], + OR(r1, _r2, _r3) => vec![r1], + ORI(r1, _r2, _i) => vec![r1], + SLL(r1, _r2, _r3) => vec![r1], + SLLI(r1, _r2, _i) => vec![r1], + SMO(_r1, _r2, _r3, _r4) => vec![], + SRL(r1, _r2, _r3) => vec![r1], + SRLI(r1, _r2, _i) => vec![r1], + SUB(r1, _r2, _r3) => vec![r1], + SUBI(r1, _r2, _i) => vec![r1], + XOR(r1, _r2, _r3) => vec![r1], + XORI(r1, _r2, _i) => vec![r1], + JMP(_r1) => vec![], + JI(_im) => vec![], + JNEI(_r1, _r2, _i) => vec![], + JNZI(_r1, _i) => vec![], + RET(_r1) => vec![], + RETD(_r1, _r2) => vec![], + CFEI(_imm) => vec![], + CFSI(_imm) => vec![], + LB(r1, _r2, _i) => vec![r1], + LWDataId(r1, _i) => vec![r1], + LW(r1, _r2, _i) => vec![r1], + ALOC(_r1) => vec![], + MCL(_r1, _r2) => vec![], + MCLI(_r1, _imm) => vec![], + MCP(_r1, _r2, _r3) => vec![], + MEQ(r1, _r2, _r3, _r4) => vec![r1], + MCPI(_r1, _r2, _imm) => vec![], + SB(_r1, _r2, _i) => vec![], + SW(_r1, _r2, _i) => vec![], + BAL(r1, _r2, _r3) => vec![r1], + BHSH(_r1, _r2) => vec![], + BHEI(r1) => vec![r1], + BURN(_r1) => vec![], + CALL(_r1, _r2, _r3, _r4) => vec![], + CCP(_r1, _r2, _r3, _r4) => vec![], + CROO(_r1, _r2) => vec![], + CSIZ(r1, _r2) => vec![r1], + CB(_r1) => vec![], + LDC(_r1, _r2, _r3) => vec![], + LOG(_r1, _r2, _r3, _r4) => vec![], + LOGD(_r1, _r2, _r3, _r4) => vec![], + MINT(_r1) => vec![], + RVRT(_r1) => vec![], + SRW(r1, _r2) => vec![r1], + SRWQ(_r1, _r2) => vec![], + SWW(_r1, _r2) => vec![], + SWWQ(_r1, _r2) => vec![], + TIME(r1, _r2) => vec![r1], + TR(_r1, _r2, _r3) => vec![], + TRO(_r1, _r2, _r3, _r4) => vec![], + ECR(_r1, _r2, _r3) => vec![], + K256(_r1, _r2, _r3) => vec![], + S256(_r1, _r2, _r3) => vec![], + NOOP => vec![], + FLAG(_r1) => vec![], + GM(r1, _imm) => vec![r1], + Undefined | DataSectionOffsetPlaceholder => vec![], + DataSectionRegisterLoadPlaceholder => vec![&AllocatedRegister::Constant( + ConstantRegister::DataSectionStart, + )], + }) + .into_iter() + .collect() + } +} + +impl fmt::Display for AllocatedOpcode { + fn fmt(&self, fmtr: &mut fmt::Formatter<'_>) -> fmt::Result { + use AllocatedOpcode::*; + match self { + ADD(a, b, c) => write!(fmtr, "add {} {} {}", a, b, c), + ADDI(a, b, c) => write!(fmtr, "addi {} {} {}", a, b, c), + AND(a, b, c) => write!(fmtr, "and {} {} {}", a, b, c), + ANDI(a, b, c) => write!(fmtr, "andi {} {} {}", a, b, c), + DIV(a, b, c) => write!(fmtr, "div {} {} {}", a, b, c), + DIVI(a, b, c) => write!(fmtr, "divi {} {} {}", a, b, c), + EQ(a, b, c) => write!(fmtr, "eq {} {} {}", a, b, c), + EXP(a, b, c) => write!(fmtr, "exp {} {} {}", a, b, c), + EXPI(a, b, c) => write!(fmtr, "expi {} {} {}", a, b, c), + GT(a, b, c) => write!(fmtr, "gt {} {} {}", a, b, c), + GTF(a, b, c) => write!(fmtr, "gtf {} {} {}", a, b, c), + LT(a, b, c) => write!(fmtr, "lt {} {} {}", a, b, c), + MLOG(a, b, c) => write!(fmtr, "mlog {} {} {}", a, b, c), + MROO(a, b, c) => write!(fmtr, "mroo {} {} {}", a, b, c), + MOD(a, b, c) => write!(fmtr, "mod {} {} {}", a, b, c), + MODI(a, b, c) => write!(fmtr, "modi {} {} {}", a, b, c), + MOVE(a, b) => write!(fmtr, "move {} {}", a, b), + MOVI(a, b) => write!(fmtr, "movi {} {}", a, b), + MUL(a, b, c) => write!(fmtr, "mul {} {} {}", a, b, c), + MULI(a, b, c) => write!(fmtr, "muli {} {} {}", a, b, c), + NOT(a, b) => write!(fmtr, "not {} {}", a, b), + OR(a, b, c) => write!(fmtr, "or {} {} {}", a, b, c), + ORI(a, b, c) => write!(fmtr, "ori {} {} {}", a, b, c), + SLL(a, b, c) => write!(fmtr, "sll {} {} {}", a, b, c), + SLLI(a, b, c) => write!(fmtr, "slli {} {} {}", a, b, c), + SMO(a, b, c, d) => write!(fmtr, "smo {} {} {} {}", a, b, c, d), + SRL(a, b, c) => write!(fmtr, "srl {} {} {}", a, b, c), + SRLI(a, b, c) => write!(fmtr, "srli {} {} {}", a, b, c), + SUB(a, b, c) => write!(fmtr, "sub {} {} {}", a, b, c), + SUBI(a, b, c) => write!(fmtr, "subi {} {} {}", a, b, c), + XOR(a, b, c) => write!(fmtr, "xor {} {} {}", a, b, c), + XORI(a, b, c) => write!(fmtr, "xori {} {} {}", a, b, c), + JMP(a) => write!(fmtr, "jmp {}", a), + JI(a) => write!(fmtr, "ji {}", a), + JNEI(a, b, c) => write!(fmtr, "jnei {} {} {}", a, b, c), + JNZI(a, b) => write!(fmtr, "jnzi {} {}", a, b), + RET(a) => write!(fmtr, "ret {}", a), + RETD(a, b) => write!(fmtr, "retd {} {}", a, b), + CFEI(a) => write!(fmtr, "cfei {}", a), + CFSI(a) => write!(fmtr, "cfsi {}", a), + LB(a, b, c) => write!(fmtr, "lb {} {} {}", a, b, c), + LWDataId(a, b) => write!(fmtr, "lw {} {}", a, b), + LW(a, b, c) => write!(fmtr, "lw {} {} {}", a, b, c), + ALOC(a) => write!(fmtr, "aloc {}", a), + MCL(a, b) => write!(fmtr, "mcl {} {}", a, b), + MCLI(a, b) => write!(fmtr, "mcli {} {}", a, b), + MCP(a, b, c) => write!(fmtr, "mcp {} {} {}", a, b, c), + MCPI(a, b, c) => write!(fmtr, "mcpi {} {} {}", a, b, c), + MEQ(a, b, c, d) => write!(fmtr, "meq {} {} {} {}", a, b, c, d), + SB(a, b, c) => write!(fmtr, "sb {} {} {}", a, b, c), + SW(a, b, c) => write!(fmtr, "sw {} {} {}", a, b, c), + BAL(a, b, c) => write!(fmtr, "bal {} {} {}", a, b, c), + BHSH(a, b) => write!(fmtr, "bhsh {} {}", a, b), + BHEI(a) => write!(fmtr, "bhei {}", a), + BURN(a) => write!(fmtr, "burn {}", a), + CALL(a, b, c, d) => write!(fmtr, "call {} {} {} {}", a, b, c, d), + CCP(a, b, c, d) => write!(fmtr, "ccp {} {} {} {}", a, b, c, d), + CROO(a, b) => write!(fmtr, "croo {} {}", a, b), + CSIZ(a, b) => write!(fmtr, "csiz {} {}", a, b), + CB(a) => write!(fmtr, "cb {}", a), + LDC(a, b, c) => write!(fmtr, "ldc {} {} {}", a, b, c), + LOG(a, b, c, d) => write!(fmtr, "log {} {} {} {}", a, b, c, d), + LOGD(a, b, c, d) => write!(fmtr, "logd {} {} {} {}", a, b, c, d), + MINT(a) => write!(fmtr, "mint {}", a), + RVRT(a) => write!(fmtr, "rvrt {}", a), + SRW(a, b) => write!(fmtr, "srw {} {}", a, b), + SRWQ(a, b) => write!(fmtr, "srwq {} {}", a, b), + SWW(a, b) => write!(fmtr, "sww {} {}", a, b), + SWWQ(a, b) => write!(fmtr, "swwq {} {}", a, b), + TIME(a, b) => write!(fmtr, "time {} {}", a, b), + TR(a, b, c) => write!(fmtr, "tr {} {} {}", a, b, c), + TRO(a, b, c, d) => write!(fmtr, "tro {} {} {} {}", a, b, c, d), + ECR(a, b, c) => write!(fmtr, "ecr {} {} {}", a, b, c), + K256(a, b, c) => write!(fmtr, "k256 {} {} {}", a, b, c), + S256(a, b, c) => write!(fmtr, "s256 {} {} {}", a, b, c), + NOOP => write!(fmtr, "noop"), + FLAG(a) => write!(fmtr, "flag {}", a), + GM(a, b) => write!(fmtr, "gm {} {}", a, b), + Undefined => write!(fmtr, "undefined op"), + DataSectionOffsetPlaceholder => { + write!( + fmtr, + "DATA_SECTION_OFFSET[0..32]\nDATA_SECTION_OFFSET[32..64]" + ) + } + DataSectionRegisterLoadPlaceholder => write!(fmtr, "lw $ds $is 1"), + } + } +} + #[derive(Clone, Debug)] pub(crate) struct AllocatedOp { pub(crate) opcode: AllocatedOpcode, @@ -182,93 +369,9 @@ pub(crate) struct AllocatedOp { impl fmt::Display for AllocatedOp { fn fmt(&self, fmtr: &mut fmt::Formatter<'_>) -> fmt::Result { - use AllocatedOpcode::*; - #[rustfmt::skip] - let string = match &self.opcode { - ADD(a, b, c) => format!("add {} {} {}", a, b, c), - ADDI(a, b, c) => format!("addi {} {} {}", a, b, c), - AND(a, b, c) => format!("and {} {} {}", a, b, c), - ANDI(a, b, c) => format!("andi {} {} {}", a, b, c), - DIV(a, b, c) => format!("div {} {} {}", a, b, c), - DIVI(a, b, c) => format!("divi {} {} {}", a, b, c), - EQ(a, b, c) => format!("eq {} {} {}", a, b, c), - EXP(a, b, c) => format!("exp {} {} {}", a, b, c), - EXPI(a, b, c) => format!("expi {} {} {}", a, b, c), - GT(a, b, c) => format!("gt {} {} {}", a, b, c), - GTF(a, b, c) => format!("gtf {} {} {}", a, b, c), - LT(a, b, c) => format!("lt {} {} {}", a, b, c), - MLOG(a, b, c) => format!("mlog {} {} {}", a, b, c), - MROO(a, b, c) => format!("mroo {} {} {}", a, b, c), - MOD(a, b, c) => format!("mod {} {} {}", a, b, c), - MODI(a, b, c) => format!("modi {} {} {}", a, b, c), - MOVE(a, b) => format!("move {} {}", a, b), - MOVI(a, b) => format!("movi {} {}", a, b), - MUL(a, b, c) => format!("mul {} {} {}", a, b, c), - MULI(a, b, c) => format!("muli {} {} {}", a, b, c), - NOT(a, b) => format!("not {} {}", a, b), - OR(a, b, c) => format!("or {} {} {}", a, b, c), - ORI(a, b, c) => format!("ori {} {} {}", a, b, c), - SLL(a, b, c) => format!("sll {} {} {}", a, b, c), - SLLI(a, b, c) => format!("slli {} {} {}", a, b, c), - SMO(a, b, c, d) => format!("smo {} {} {} {}", a, b, c, d), - SRL(a, b, c) => format!("srl {} {} {}", a, b, c), - SRLI(a, b, c) => format!("srli {} {} {}", a, b, c), - SUB(a, b, c) => format!("sub {} {} {}", a, b, c), - SUBI(a, b, c) => format!("subi {} {} {}", a, b, c), - XOR(a, b, c) => format!("xor {} {} {}", a, b, c), - XORI(a, b, c) => format!("xori {} {} {}", a, b, c), - JI(a) => format!("ji {}", a), - JNEI(a, b, c) => format!("jnei {} {} {}", a, b, c), - JNZI(a, b) => format!("jnzi {} {}", a, b), - RET(a) => format!("ret {}", a), - RETD(a, b) => format!("retd {} {}", a, b), - CFEI(a) => format!("cfei {}", a), - CFSI(a) => format!("cfsi {}", a), - LB(a, b, c) => format!("lb {} {} {}", a, b, c), - LWDataId(a, b) => format!("lw {} {}", a, b), - LW(a, b, c) => format!("lw {} {} {}", a, b, c), - ALOC(a) => format!("aloc {}", a), - MCL(a, b) => format!("mcl {} {}", a, b), - MCLI(a, b) => format!("mcli {} {}", a, b), - MCP(a, b, c) => format!("mcp {} {} {}", a, b, c), - MCPI(a, b, c) => format!("mcpi {} {} {}", a, b, c), - MEQ(a, b, c, d) => format!("meq {} {} {} {}", a, b, c, d), - SB(a, b, c) => format!("sb {} {} {}", a, b, c), - SW(a, b, c) => format!("sw {} {} {}", a, b, c), - BAL(a, b, c) => format!("bal {} {} {}", a, b, c), - BHSH(a, b) => format!("bhsh {} {}", a, b), - BHEI(a) => format!("bhei {}", a), - BURN(a) => format!("burn {}", a), - CALL(a, b, c, d)=> format!("call {} {} {} {}", a, b, c, d), - CCP(a, b, c, d) => format!("ccp {} {} {} {}", a, b, c, d), - CROO(a, b) => format!("croo {} {}", a, b), - CSIZ(a, b) => format!("csiz {} {}", a, b), - CB(a) => format!("cb {}", a), - LDC(a, b, c) => format!("ldc {} {} {}", a, b, c), - LOG(a, b, c, d) => format!("log {} {} {} {}", a, b, c, d), - LOGD(a, b, c, d)=> format!("logd {} {} {} {}", a, b, c, d), - MINT(a) => format!("mint {}", a), - RVRT(a) => format!("rvrt {}", a), - SRW(a, b) => format!("srw {} {}", a, b), - SRWQ(a, b) => format!("srwq {} {}", a, b), - SWW(a, b) => format!("sww {} {}", a, b), - SWWQ(a, b) => format!("swwq {} {}", a, b), - TIME(a, b) => format!("time {} {}", a, b), - TR(a, b, c) => format!("tr {} {} {}", a, b, c), - TRO(a, b, c, d) => format!("tro {} {} {} {}", a, b, c, d), - ECR(a, b, c) => format!("ecr {} {} {}", a, b, c), - K256(a, b, c) => format!("k256 {} {} {}", a, b, c), - S256(a, b, c) => format!("s256 {} {} {}", a, b, c), - NOOP => "noop".to_string(), - FLAG(a) => format!("flag {}", a), - GM(a, b) => format!("gm {} {}", a, b), - Undefined => "undefined op".into(), - DataSectionOffsetPlaceholder => "DATA_SECTION_OFFSET[0..32]\nDATA_SECTION_OFFSET[32..64]".into(), - DataSectionRegisterLoadPlaceholder => "lw $ds $is 1".into(), - }; - // we want the comment to always be COMMENT_START_COLUMN characters offset to the right - // to not interfere with the ASM but to be aligned - let mut op_and_comment = string; + // We want the comment to always be COMMENT_START_COLUMN characters offset to the right to + // not interfere with the ASM but to be aligned. + let mut op_and_comment = self.opcode.to_string(); if !self.comment.is_empty() { while op_and_comment.len() < COMMENT_START_COLUMN { op_and_comment.push(' '); @@ -323,6 +426,7 @@ impl AllocatedOp { SUBI(a, b, c) => VmOp::SUBI(a.to_register_id(), b.to_register_id(), c.value), XOR (a, b, c) => VmOp::XOR (a.to_register_id(), b.to_register_id(), c.to_register_id()), XORI(a, b, c) => VmOp::XORI(a.to_register_id(), b.to_register_id(), c.value), + JMP(a) => VmOp::JMP(a.to_register_id()), JI (a) => VmOp::JI (a.value), JNEI(a, b, c) => VmOp::JNEI(a.to_register_id(), b.to_register_id(), c.value), JNZI(a, b) => VmOp::JNZI(a.to_register_id(), b.value), @@ -396,10 +500,10 @@ fn realize_lw( // if this data is larger than a word, instead of loading the data directly // into the register, we want to load a pointer to the data into the register // this appends onto the data section and mutates it by adding the pointer as a literal - let type_of_data = data_section.type_of_data(data_id).expect( + let has_copy_type = data_section.has_copy_type(data_id).expect( "Internal miscalculation in data section -- data id did not match up to any actual data", ); - if !type_of_data.is_copy_type() { + if !has_copy_type { // load the pointer itself into the register // `offset_to_data_section` is in bytes. We want a byte // address here diff --git a/sway-core/src/asm_lang/mod.rs b/sway-core/src/asm_lang/mod.rs index f12e23dd9ac..c94d843f5b4 100644 --- a/sway-core/src/asm_lang/mod.rs +++ b/sway-core/src/asm_lang/mod.rs @@ -13,14 +13,21 @@ pub(crate) use virtual_immediate::*; pub(crate) use virtual_ops::*; pub(crate) use virtual_register::*; -use crate::{asm_generation::DataId, error::*, parse_tree::AsmRegister, Ident}; +use crate::{ + asm_generation::{DataId, RegisterPool}, + asm_lang::allocated_ops::{AllocatedOpcode, AllocatedRegister}, + error::*, + parse_tree::AsmRegister, + Ident, +}; use sway_types::{span::Span, Spanned}; use either::Either; use std::{ - collections::HashSet, + collections::{BTreeSet, HashMap}, fmt::{self, Write}, + hash::Hash, }; /// The column where the ; for comments starts @@ -32,7 +39,7 @@ impl From<&AsmRegister> for VirtualRegister { } } -#[derive(Clone)] +#[derive(Debug, Clone)] pub(crate) struct Op { pub(crate) opcode: Either, /// A descriptive comment for ASM readability @@ -40,9 +47,17 @@ pub(crate) struct Op { pub(crate) owning_span: Option, } +#[derive(Clone, Debug)] +pub(crate) struct AllocatedAbstractOp { + pub(crate) opcode: Either>, + /// A descriptive comment for ASM readability + pub(crate) comment: String, + pub(crate) owning_span: Option, +} + #[derive(Clone, Debug)] pub(crate) struct RealizedOp { - pub(crate) opcode: VirtualOp, + pub(crate) opcode: AllocatedOpcode, /// A descriptive comment for ASM readability pub(crate) comment: String, pub(crate) owning_span: Option, @@ -170,51 +185,31 @@ impl Op { } } - /// Moves the register in the second argument into the register in the first argument - pub(crate) fn register_move( - r1: VirtualRegister, - r2: VirtualRegister, - owning_span: Span, - ) -> Self { - Op { - opcode: Either::Left(VirtualOp::MOVE(r1, r2)), - comment: String::new(), - owning_span: Some(owning_span), - } - } - - /// Moves the register in the second argument into the register in the first argument - pub(crate) fn unowned_register_move(r1: VirtualRegister, r2: VirtualRegister) -> Self { - Op { - opcode: Either::Left(VirtualOp::MOVE(r1, r2)), - comment: String::new(), - owning_span: None, - } - } - - pub(crate) fn register_move_comment( - r1: VirtualRegister, - r2: VirtualRegister, - owning_span: Span, + /// Move an address at a label into a register. + pub(crate) fn move_address( + reg: VirtualRegister, + label: Label, comment: impl Into, + owning_span: Option, ) -> Self { Op { - opcode: Either::Left(VirtualOp::MOVE(r1, r2)), + opcode: Either::Right(OrganizationalOp::MoveAddress(reg, label)), comment: comment.into(), - owning_span: Some(owning_span), + owning_span, } } /// Moves the register in the second argument into the register in the first argument - pub(crate) fn unowned_register_move_comment( + pub(crate) fn register_move( r1: VirtualRegister, r2: VirtualRegister, comment: impl Into, + owning_span: Option, ) -> Self { Op { opcode: Either::Left(VirtualOp::MOVE(r1, r2)), comment: comment.into(), - owning_span: None, + owning_span, } } @@ -264,6 +259,19 @@ impl Op { } } + /// Dymamically jumps to a register value. + pub(crate) fn jump_to_register( + reg: VirtualRegister, + comment: impl Into, + owning_span: Option, + ) -> Self { + Op { + opcode: Either::Left(VirtualOp::JMP(reg)), + comment: comment.into(), + owning_span, + } + } + pub(crate) fn parse_opcode( name: &Ident, args: &[VirtualRegister], @@ -962,6 +970,58 @@ impl Op { errors, ) } + + pub(crate) fn registers(&self) -> BTreeSet<&VirtualRegister> { + match &self.opcode { + Either::Left(virt_op) => virt_op.registers(), + Either::Right(org_op) => org_op.registers(), + } + } + + pub(crate) fn use_registers(&self) -> BTreeSet<&VirtualRegister> { + match &self.opcode { + Either::Left(virt_op) => virt_op.use_registers(), + Either::Right(org_op) => org_op.use_registers(), + } + } + + pub(crate) fn def_registers(&self) -> BTreeSet<&VirtualRegister> { + match &self.opcode { + Either::Left(virt_op) => virt_op.def_registers(), + Either::Right(org_op) => org_op.def_registers(), + } + } + + pub(crate) fn successors(&self, index: usize, ops: &[Op]) -> Vec { + match &self.opcode { + Either::Left(virt_op) => virt_op.successors(index, ops), + Either::Right(org_op) => org_op.successors(index, ops), + } + } + + pub(crate) fn update_register( + &self, + reg_to_reg_map: &HashMap, + ) -> Self { + Op { + opcode: match &self.opcode { + Either::Left(virt_op) => Either::Left(virt_op.update_register(reg_to_reg_map)), + Either::Right(org_op) => Either::Right(org_op.update_register(reg_to_reg_map)), + }, + comment: self.comment.clone(), + owning_span: self.owning_span.clone(), + } + } + + pub(crate) fn allocate_registers( + &self, + pool: &RegisterPool, + ) -> Either> { + match &self.opcode { + Either::Left(virt_op) => Either::Left(virt_op.allocate_registers(pool)), + Either::Right(org_op) => Either::Right(org_op.allocate_registers(pool)), + } + } } fn single_reg( @@ -1288,107 +1348,118 @@ fn two_regs_imm_12( impl fmt::Display for Op { fn fmt(&self, fmtr: &mut fmt::Formatter<'_>) -> fmt::Result { - use OrganizationalOp::*; + // We want the comment to always be 40 characters offset to the right to not interfere with + // the ASM but to be aligned. + let mut op_and_comment = self.opcode.to_string(); + if !self.comment.is_empty() { + while op_and_comment.len() < COMMENT_START_COLUMN { + op_and_comment.push(' '); + } + write!(op_and_comment, "; {}", self.comment)?; + } + + write!(fmtr, "{}", op_and_comment) + } +} + +impl fmt::Display for VirtualOp { + fn fmt(&self, fmtr: &mut fmt::Formatter<'_>) -> fmt::Result { use VirtualOp::*; - let op_str = match &self.opcode { - Either::Left(opcode) => match opcode { - ADD(a, b, c) => format!("add {} {} {}", a, b, c), - ADDI(a, b, c) => format!("addi {} {} {}", a, b, c), - AND(a, b, c) => format!("and {} {} {}", a, b, c), - ANDI(a, b, c) => format!("andi {} {} {}", a, b, c), - DIV(a, b, c) => format!("div {} {} {}", a, b, c), - DIVI(a, b, c) => format!("divi {} {} {}", a, b, c), - EQ(a, b, c) => format!("eq {} {} {}", a, b, c), - EXP(a, b, c) => format!("exp {} {} {}", a, b, c), - EXPI(a, b, c) => format!("expi {} {} {}", a, b, c), - GT(a, b, c) => format!("gt {} {} {}", a, b, c), - GTF(a, b, c) => format!("gtf {} {} {}", a, b, c), - LT(a, b, c) => format!("lt {} {} {}", a, b, c), - MLOG(a, b, c) => format!("mlog {} {} {}", a, b, c), - MROO(a, b, c) => format!("mroo {} {} {}", a, b, c), - MOD(a, b, c) => format!("mod {} {} {}", a, b, c), - MODI(a, b, c) => format!("modi {} {} {}", a, b, c), - MOVE(a, b) => format!("move {} {}", a, b), - MOVI(a, b) => format!("movi {} {}", a, b), - MUL(a, b, c) => format!("mul {} {} {}", a, b, c), - MULI(a, b, c) => format!("muli {} {} {}", a, b, c), - NOT(a, b) => format!("not {} {}", a, b), - OR(a, b, c) => format!("or {} {} {}", a, b, c), - ORI(a, b, c) => format!("ori {} {} {}", a, b, c), - SLL(a, b, c) => format!("sll {} {} {}", a, b, c), - SLLI(a, b, c) => format!("slli {} {} {}", a, b, c), - SMO(a, b, c, d) => format!("smo {} {} {} {}", a, b, c, d), - SRL(a, b, c) => format!("srl {} {} {}", a, b, c), - SRLI(a, b, c) => format!("srli {} {} {}", a, b, c), - SUB(a, b, c) => format!("sub {} {} {}", a, b, c), - SUBI(a, b, c) => format!("subi {} {} {}", a, b, c), - XOR(a, b, c) => format!("xor {} {} {}", a, b, c), - XORI(a, b, c) => format!("xori {} {} {}", a, b, c), - JI(a) => format!("ji {}", a), - JNEI(a, b, c) => format!("jnei {} {} {}", a, b, c), - JNZI(a, b) => format!("jnzi {} {}", a, b), - RET(a) => format!("ret {}", a), - RETD(a, b) => format!("retd {} {}", a, b), - CFEI(a) => format!("cfei {}", a), - CFSI(a) => format!("cfsi {}", a), - LB(a, b, c) => format!("lb {} {} {}", a, b, c), - LWDataId(a, b) => format!("lw {} {}", a, b), - LW(a, b, c) => format!("lw {} {} {}", a, b, c), - ALOC(a) => format!("aloc {}", a), - MCL(a, b) => format!("mcl {} {}", a, b), - MCLI(a, b) => format!("mcli {} {}", a, b), - MCP(a, b, c) => format!("mcp {} {} {}", a, b, c), - MCPI(a, b, c) => format!("mcpi {} {} {}", a, b, c), - MEQ(a, b, c, d) => format!("meq {} {} {} {}", a, b, c, d), - SB(a, b, c) => format!("sb {} {} {}", a, b, c), - SW(a, b, c) => format!("sw {} {} {}", a, b, c), - BAL(a, b, c) => format!("bal {} {} {}", a, b, c), - BHSH(a, b) => format!("bhsh {} {}", a, b), - BHEI(a) => format!("bhei {}", a), - BURN(a) => format!("burn {}", a), - CALL(a, b, c, d) => format!("call {} {} {} {}", a, b, c, d), - CCP(a, b, c, d) => format!("ccp {} {} {} {}", a, b, c, d), - CROO(a, b) => format!("croo {} {}", a, b), - CSIZ(a, b) => format!("csiz {} {}", a, b), - CB(a) => format!("cb {}", a), - LDC(a, b, c) => format!("ldc {} {} {}", a, b, c), - LOG(a, b, c, d) => format!("log {} {} {} {}", a, b, c, d), - LOGD(a, b, c, d) => format!("logd {} {} {} {}", a, b, c, d), - MINT(a) => format!("mint {}", a), - RVRT(a) => format!("rvrt {}", a), - SRW(a, b) => format!("srw {} {}", a, b), - SRWQ(a, b) => format!("srwq {} {}", a, b), - SWW(a, b) => format!("sww {} {}", a, b), - SWWQ(a, b) => format!("swwq {} {}", a, b), - TIME(a, b) => format!("time {} {}", a, b), - TR(a, b, c) => format!("tr {} {} {}", a, b, c), - TRO(a, b, c, d) => format!("tro {} {} {} {}", a, b, c, d), - ECR(a, b, c) => format!("ecr {} {} {}", a, b, c), - K256(a, b, c) => format!("k256 {} {} {}", a, b, c), - S256(a, b, c) => format!("s256 {} {} {}", a, b, c), - NOOP => "noop".to_string(), - FLAG(a) => format!("flag {}", a), - GM(a, b) => format!("gm {} {}", a, b), - Undefined => "undefined op".into(), - VirtualOp::DataSectionOffsetPlaceholder => "data section offset placeholder".into(), - DataSectionRegisterLoadPlaceholder => { - "data section register load placeholder".into() - } - }, - Either::Right(opcode) => match opcode { - Label(l) => format!("{}", l), - Comment => "".into(), - Jump(label) => format!("jump {}", label), - JumpIfNotEq(reg0, reg1, label) => format!("jnei {} {} {}", reg0, reg1, label), - JumpIfNotZero(reg0, label) => format!("jnzi {} {}", reg0, label), - OrganizationalOp::DataSectionOffsetPlaceholder => { - "data section offset placeholder".into() - } - }, - }; - // we want the comment to always be 40 characters offset to the right - // to not interfere with the ASM but to be aligned - let mut op_and_comment = op_str; + match self { + ADD(a, b, c) => write!(fmtr, "add {} {} {}", a, b, c), + ADDI(a, b, c) => write!(fmtr, "addi {} {} {}", a, b, c), + AND(a, b, c) => write!(fmtr, "and {} {} {}", a, b, c), + ANDI(a, b, c) => write!(fmtr, "andi {} {} {}", a, b, c), + DIV(a, b, c) => write!(fmtr, "div {} {} {}", a, b, c), + DIVI(a, b, c) => write!(fmtr, "divi {} {} {}", a, b, c), + EQ(a, b, c) => write!(fmtr, "eq {} {} {}", a, b, c), + EXP(a, b, c) => write!(fmtr, "exp {} {} {}", a, b, c), + EXPI(a, b, c) => write!(fmtr, "expi {} {} {}", a, b, c), + GT(a, b, c) => write!(fmtr, "gt {} {} {}", a, b, c), + GTF(a, b, c) => write!(fmtr, "gtf {} {} {}", a, b, c), + LT(a, b, c) => write!(fmtr, "lt {} {} {}", a, b, c), + MLOG(a, b, c) => write!(fmtr, "mlog {} {} {}", a, b, c), + MROO(a, b, c) => write!(fmtr, "mroo {} {} {}", a, b, c), + MOD(a, b, c) => write!(fmtr, "mod {} {} {}", a, b, c), + MODI(a, b, c) => write!(fmtr, "modi {} {} {}", a, b, c), + MOVE(a, b) => write!(fmtr, "move {} {}", a, b), + MOVI(a, b) => write!(fmtr, "movi {} {}", a, b), + MUL(a, b, c) => write!(fmtr, "mul {} {} {}", a, b, c), + MULI(a, b, c) => write!(fmtr, "muli {} {} {}", a, b, c), + NOT(a, b) => write!(fmtr, "not {} {}", a, b), + OR(a, b, c) => write!(fmtr, "or {} {} {}", a, b, c), + ORI(a, b, c) => write!(fmtr, "ori {} {} {}", a, b, c), + SLL(a, b, c) => write!(fmtr, "sll {} {} {}", a, b, c), + SLLI(a, b, c) => write!(fmtr, "slli {} {} {}", a, b, c), + SMO(a, b, c, d) => write!(fmtr, "smo {} {} {} {}", a, b, c, d), + SRL(a, b, c) => write!(fmtr, "srl {} {} {}", a, b, c), + SRLI(a, b, c) => write!(fmtr, "srli {} {} {}", a, b, c), + SUB(a, b, c) => write!(fmtr, "sub {} {} {}", a, b, c), + SUBI(a, b, c) => write!(fmtr, "subi {} {} {}", a, b, c), + XOR(a, b, c) => write!(fmtr, "xor {} {} {}", a, b, c), + XORI(a, b, c) => write!(fmtr, "xori {} {} {}", a, b, c), + JMP(a) => write!(fmtr, "jmp {}", a), + JI(a) => write!(fmtr, "ji {}", a), + JNEI(a, b, c) => write!(fmtr, "jnei {} {} {}", a, b, c), + JNZI(a, b) => write!(fmtr, "jnzi {} {}", a, b), + RET(a) => write!(fmtr, "ret {}", a), + RETD(a, b) => write!(fmtr, "retd {} {}", a, b), + CFEI(a) => write!(fmtr, "cfei {}", a), + CFSI(a) => write!(fmtr, "cfsi {}", a), + LB(a, b, c) => write!(fmtr, "lb {} {} {}", a, b, c), + LWDataId(a, b) => write!(fmtr, "lw {} {}", a, b), + LW(a, b, c) => write!(fmtr, "lw {} {} {}", a, b, c), + ALOC(a) => write!(fmtr, "aloc {}", a), + MCL(a, b) => write!(fmtr, "mcl {} {}", a, b), + MCLI(a, b) => write!(fmtr, "mcli {} {}", a, b), + MCP(a, b, c) => write!(fmtr, "mcp {} {} {}", a, b, c), + MCPI(a, b, c) => write!(fmtr, "mcpi {} {} {}", a, b, c), + MEQ(a, b, c, d) => write!(fmtr, "meq {} {} {} {}", a, b, c, d), + SB(a, b, c) => write!(fmtr, "sb {} {} {}", a, b, c), + SW(a, b, c) => write!(fmtr, "sw {} {} {}", a, b, c), + BAL(a, b, c) => write!(fmtr, "bal {} {} {}", a, b, c), + BHSH(a, b) => write!(fmtr, "bhsh {} {}", a, b), + BHEI(a) => write!(fmtr, "bhei {}", a), + BURN(a) => write!(fmtr, "burn {}", a), + CALL(a, b, c, d) => write!(fmtr, "call {} {} {} {}", a, b, c, d), + CCP(a, b, c, d) => write!(fmtr, "ccp {} {} {} {}", a, b, c, d), + CROO(a, b) => write!(fmtr, "croo {} {}", a, b), + CSIZ(a, b) => write!(fmtr, "csiz {} {}", a, b), + CB(a) => write!(fmtr, "cb {}", a), + LDC(a, b, c) => write!(fmtr, "ldc {} {} {}", a, b, c), + LOG(a, b, c, d) => write!(fmtr, "log {} {} {} {}", a, b, c, d), + LOGD(a, b, c, d) => write!(fmtr, "logd {} {} {} {}", a, b, c, d), + MINT(a) => write!(fmtr, "mint {}", a), + RVRT(a) => write!(fmtr, "rvrt {}", a), + SRW(a, b) => write!(fmtr, "srw {} {}", a, b), + SRWQ(a, b) => write!(fmtr, "srwq {} {}", a, b), + SWW(a, b) => write!(fmtr, "sww {} {}", a, b), + SWWQ(a, b) => write!(fmtr, "swwq {} {}", a, b), + TIME(a, b) => write!(fmtr, "time {} {}", a, b), + TR(a, b, c) => write!(fmtr, "tr {} {} {}", a, b, c), + TRO(a, b, c, d) => write!(fmtr, "tro {} {} {} {}", a, b, c, d), + ECR(a, b, c) => write!(fmtr, "ecr {} {} {}", a, b, c), + K256(a, b, c) => write!(fmtr, "k256 {} {} {}", a, b, c), + S256(a, b, c) => write!(fmtr, "s256 {} {} {}", a, b, c), + NOOP => Ok(()), + FLAG(a) => write!(fmtr, "flag {}", a), + GM(a, b) => write!(fmtr, "gm {} {}", a, b), + + Undefined => write!(fmtr, "undefined op"), + + DataSectionOffsetPlaceholder => write!(fmtr, "data section offset placeholder"), + DataSectionRegisterLoadPlaceholder => { + write!(fmtr, "data section register load placeholder") + } + } + } +} + +impl fmt::Display for AllocatedAbstractOp { + fn fmt(&self, fmtr: &mut fmt::Formatter<'_>) -> fmt::Result { + // We want the comment to always be 40 characters offset to the right to not interfere with + // the ASM but to be aligned. + let mut op_and_comment = self.opcode.to_string(); if !self.comment.is_empty() { while op_and_comment.len() < COMMENT_START_COLUMN { op_and_comment.push(' '); @@ -1402,8 +1473,8 @@ impl fmt::Display for Op { // Convenience opcodes for the compiler -- will be optimized out or removed // these do not reflect actual ops in the VM and will be compiled to bytecode -#[derive(Clone)] -pub(crate) enum OrganizationalOp { +#[derive(Debug, Clone)] +pub(crate) enum ControlFlowOp { // Labels the code for jumps, will later be interpreted into offsets Label(Label), // Just a comment that will be inserted into the asm without an op @@ -1411,15 +1482,26 @@ pub(crate) enum OrganizationalOp { // Jumps to a label Jump(Label), // Jumps to a label if the two registers are different - JumpIfNotEq(VirtualRegister, VirtualRegister, Label), + JumpIfNotEq(Reg, Reg, Label), // Jumps to a label if the register is not equal to zero - JumpIfNotZero(VirtualRegister, Label), + JumpIfNotZero(Reg, Label), + // Jumps to a label, similarly to Jump, though semantically expecting to return. + Call(Label), + // Save a label address in a register. + MoveAddress(Reg, Label), // placeholder for the DataSection offset DataSectionOffsetPlaceholder, + // Save all currently live general purpose registers, using a label as a handle. + PushAll(Label), + // Restore all previously saved general purpose registers. + PopAll(Label), } -impl fmt::Display for OrganizationalOp { + +pub(crate) type OrganizationalOp = ControlFlowOp; + +impl fmt::Display for ControlFlowOp { fn fmt(&self, fmtr: &mut fmt::Formatter<'_>) -> fmt::Result { - use OrganizationalOp::*; + use ControlFlowOp::*; write!( fmtr, "{}", @@ -1429,22 +1511,182 @@ impl fmt::Display for OrganizationalOp { Comment => "".into(), JumpIfNotEq(r1, r2, lab) => format!("jnei {} {} {}", r1, r2, lab), JumpIfNotZero(r1, lab) => format!("jnzi {} {}", r1, lab), + Call(lab) => format!("fncall {lab}"), + MoveAddress(r1, lab) => format!("mova {} {}", r1, lab), DataSectionOffsetPlaceholder => "DATA SECTION OFFSET[0..32]\nDATA SECTION OFFSET[32..64]".into(), + PushAll(lab) => format!("pusha {lab}"), + PopAll(lab) => format!("popa {lab}"), } ) } } -impl OrganizationalOp { - pub(crate) fn registers(&self) -> HashSet<&VirtualRegister> { - use OrganizationalOp::*; +impl ControlFlowOp { + pub(crate) fn registers(&self) -> BTreeSet<&Reg> { + use ControlFlowOp::*; (match self { - Label(_) | Comment | Jump(_) | DataSectionOffsetPlaceholder => vec![], + Label(_) + | Comment + | Jump(_) + | Call(_) + | DataSectionOffsetPlaceholder + | PushAll(_) + | PopAll(_) => vec![], + JumpIfNotEq(r1, r2, _) => vec![r1, r2], + JumpIfNotZero(r1, _) | MoveAddress(r1, _) => vec![r1], + }) + .into_iter() + .collect() + } + + pub(crate) fn use_registers(&self) -> BTreeSet<&Reg> { + use ControlFlowOp::*; + (match self { + Label(_) + | Comment + | Jump(_) + | Call(_) + | MoveAddress(..) + | DataSectionOffsetPlaceholder + | PushAll(_) + | PopAll(_) => vec![], + JumpIfNotZero(r1, _) => vec![r1], + JumpIfNotEq(r1, r2, _) => vec![r1, r2], + }) + .into_iter() + .collect() + } + + pub(crate) fn def_registers(&self) -> BTreeSet<&Reg> { + use ControlFlowOp::*; + (match self { + MoveAddress(reg, _) => vec![reg], + + Label(_) + | Comment + | Jump(_) + | JumpIfNotEq(..) + | JumpIfNotZero(..) + | Call(_) + | DataSectionOffsetPlaceholder + | PushAll(_) + | PopAll(_) => vec![], }) .into_iter() .collect() } + + pub(crate) fn update_register(&self, reg_to_reg_map: &HashMap) -> Self { + let update_reg = |reg: &Reg| -> Reg { + reg_to_reg_map + .get(reg) + .cloned() + .unwrap_or_else(|| reg.clone()) + }; + + use ControlFlowOp::*; + match self { + Comment + | Label(_) + | Jump(_) + | Call(_) + | DataSectionOffsetPlaceholder + | PushAll(_) + | PopAll(_) => self.clone(), + + JumpIfNotEq(r1, r2, label) => Self::JumpIfNotEq(update_reg(r1), update_reg(r2), *label), + JumpIfNotZero(r1, label) => Self::JumpIfNotZero(update_reg(r1), *label), + MoveAddress(r1, label) => Self::MoveAddress(update_reg(r1), *label), + } + } + + pub(crate) fn successors(&self, index: usize, ops: &[Op]) -> Vec { + use ControlFlowOp::*; + + let mut next_ops = Vec::new(); + + if index + 1 < ops.len() && !matches!(self, Jump(_)) { + next_ops.push(index + 1); + }; + + match self { + Label(_) + | Comment + | Call(_) + | MoveAddress(..) + | DataSectionOffsetPlaceholder + | PushAll(_) + | PopAll(_) => (), + + Jump(jump_label) | JumpIfNotEq(_, _, jump_label) | JumpIfNotZero(_, jump_label) => { + // Find the label in the ops list. + for (idx, op) in ops.iter().enumerate() { + if let Either::Right(ControlFlowOp::Label(op_label)) = op.opcode { + if op_label == *jump_label { + next_ops.push(idx); + break; + } + } + } + } + }; + + next_ops + } +} + +impl ControlFlowOp { + // Copied directly from VirtualOp::allocate_registers(). + pub(crate) fn allocate_registers( + &self, + pool: &RegisterPool, + ) -> ControlFlowOp { + let virtual_registers = self.registers(); + let register_allocation_result = virtual_registers + .clone() + .into_iter() + .map(|x| match x { + VirtualRegister::Constant(c) => (x, Some(AllocatedRegister::Constant(*c))), + VirtualRegister::Virtual(_) => (x, pool.get_register(x)), + }) + .map(|(x, register_opt)| register_opt.map(|register| (x, register))) + .collect::>>(); + + // Maps virtual registers to their allocated equivalent + let mut mapping: HashMap<&VirtualRegister, AllocatedRegister> = HashMap::default(); + match register_allocation_result { + Some(o) => { + for (key, val) in o { + mapping.insert(key, val); + } + } + None => { + unimplemented!( + "The allocator cannot resolve a register mapping for this program. + This is a temporary artifact of the extremely early stage version of this language. + Try to lower the number of variables you use." + ); + } + }; + + let map_reg = |reg: &VirtualRegister| mapping.get(reg).unwrap().clone(); + + use ControlFlowOp::*; + match self { + Label(label) => Label(*label), + Comment => Comment, + Jump(label) => Jump(*label), + Call(label) => Call(*label), + DataSectionOffsetPlaceholder => DataSectionOffsetPlaceholder, + PushAll(label) => PushAll(*label), + PopAll(label) => PopAll(*label), + + JumpIfNotEq(r1, r2, label) => JumpIfNotEq(map_reg(r1), map_reg(r2), *label), + JumpIfNotZero(r1, label) => JumpIfNotZero(map_reg(r1), *label), + MoveAddress(r1, label) => MoveAddress(map_reg(r1), *label), + } + } } diff --git a/sway-core/src/asm_lang/virtual_ops.rs b/sway-core/src/asm_lang/virtual_ops.rs index 4085f17a6d6..9bb976a02b1 100644 --- a/sway-core/src/asm_lang/virtual_ops.rs +++ b/sway-core/src/asm_lang/virtual_ops.rs @@ -8,7 +8,7 @@ use super::{ allocated_ops::{AllocatedOpcode, AllocatedRegister}, virtual_immediate::*, virtual_register::*, - DataId, RealizedOp, + DataId, Op, }; use crate::asm_generation::RegisterPool; @@ -60,6 +60,7 @@ pub(crate) enum VirtualOp { SUBI(VirtualRegister, VirtualRegister, VirtualImmediate12), XOR(VirtualRegister, VirtualRegister, VirtualRegister), XORI(VirtualRegister, VirtualRegister, VirtualImmediate12), + JMP(VirtualRegister), JI(VirtualImmediate24), JNEI(VirtualRegister, VirtualRegister, VirtualImmediate12), JNZI(VirtualRegister, VirtualImmediate18), @@ -182,6 +183,7 @@ impl VirtualOp { SUBI(r1, r2, _i) => vec![r1, r2], XOR(r1, r2, r3) => vec![r1, r2, r3], XORI(r1, r2, _i) => vec![r1, r2], + JMP(r1) => vec![r1], JI(_im) => vec![], JNEI(r1, r2, _i) => vec![r1, r2], JNZI(r1, _i) => vec![r1], @@ -273,6 +275,7 @@ impl VirtualOp { SUBI(_r1, r2, _i) => vec![r2], XOR(_r1, r2, r3) => vec![r2, r3], XORI(_r1, r2, _i) => vec![r2], + JMP(r1) => vec![r1], JI(_im) => vec![], JNEI(r1, r2, _i) => vec![r1, r2], JNZI(r1, _i) => vec![r1], @@ -364,6 +367,7 @@ impl VirtualOp { SUBI(r1, _r2, _i) => vec![r1], XOR(r1, _r2, _r3) => vec![r1], XORI(r1, _r2, _i) => vec![r1], + JMP(_r1) => vec![], JI(_im) => vec![], JNEI(_r1, _r2, _i) => vec![], JNZI(_r1, _i) => vec![], @@ -422,12 +426,7 @@ impl VirtualOp { /// instructions `ops`. For most instructions, the successor is simply the next instruction in /// `ops`. The exceptions are jump instructions that can have arbitrary successors and RVRT /// which does not have any successors. - pub(crate) fn successors( - &self, - index: usize, - ops: &[RealizedOp], - offset_to_ix: &HashMap, - ) -> Vec { + pub(crate) fn successors(&self, index: usize, ops: &[Op]) -> Vec { use VirtualOp::*; let next_op = if index >= ops.len() - 1 { vec![] @@ -436,47 +435,16 @@ impl VirtualOp { }; match self { RVRT(_) => vec![], - JI(i) => { - // Single successor indicated in the jump offset. Use `offset_to_ix` to figure out - // the index in `ops` that corresponds to the offset specified. - if *offset_to_ix.get(&(i.value as u64)).unwrap() >= ops.len() { - vec![] - } else { - vec![*offset_to_ix.get(&(i.value as u64)).unwrap()] - } - } - JNEI(_, _, i) => { - // Two possible successors: the next instruction as well as the instruction - // indicated in the jump offset. Use `offset_to_ix` to figure out the index in - // `ops` that corresponds to the offset specified. - if *offset_to_ix.get(&(i.value as u64)).unwrap() >= ops.len() { - vec![].into_iter().chain(next_op.into_iter()).collect() - } else { - vec![*offset_to_ix.get(&(i.value as u64)).unwrap()] - .into_iter() - .chain(next_op.into_iter()) - .collect() - } - } - JNZI(_, i) => { - // Two possible successors: the next instruction as well as the instruction - // indicated in the jump offset. Use `offset_to_ix` to figure out the index in - // `ops` that corresponds to the offset specified. - if *offset_to_ix.get(&(i.value as u64)).unwrap() >= ops.len() { - vec![].into_iter().chain(next_op.into_iter()).collect() - } else { - vec![*offset_to_ix.get(&(i.value as u64)).unwrap()] - .into_iter() - .chain(next_op.into_iter()) - .collect() - } + JI(_) | JNEI(..) | JNZI(..) => { + unreachable!("At this stage we shouldn't have jumps in the code.") } + _ => next_op, } } pub(crate) fn update_register( - &mut self, + &self, reg_to_reg_map: &HashMap, ) -> Self { use VirtualOp::*; @@ -636,6 +604,7 @@ impl VirtualOp { update_reg(reg_to_reg_map, r2), i.clone(), ), + JMP(r1) => Self::JMP(update_reg(reg_to_reg_map, r1)), JI(_) => self.clone(), JNEI(r1, r2, i) => Self::JNEI( update_reg(reg_to_reg_map, r1), @@ -845,7 +814,7 @@ impl VirtualOp { .clone() .into_iter() .map(|x| match x { - VirtualRegister::Constant(c) => (x, Some(AllocatedRegister::Constant(c.clone()))), + VirtualRegister::Constant(c) => (x, Some(AllocatedRegister::Constant(*c))), VirtualRegister::Virtual(_) => (x, pool.get_register(x)), }) .map(|(x, register_opt)| register_opt.map(|register| (x, register))) @@ -1023,6 +992,7 @@ impl VirtualOp { map_reg(&mapping, reg2), imm.clone(), ), + JMP(reg1) => AllocatedOpcode::JMP(map_reg(&mapping, reg1)), JI(imm) => AllocatedOpcode::JI(imm.clone()), JNEI(reg1, reg2, imm) => AllocatedOpcode::JNEI( map_reg(&mapping, reg1), @@ -1200,7 +1170,7 @@ fn update_reg( } } -#[derive(Clone, Eq, PartialEq, Hash)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash)] /// A label for a spot in the bytecode, to be later compiled to an offset. pub(crate) struct Label(pub(crate) usize); impl fmt::Display for Label { diff --git a/sway-core/src/asm_lang/virtual_register.rs b/sway-core/src/asm_lang/virtual_register.rs index 0b2fad8fb60..df20fabbce9 100644 --- a/sway-core/src/asm_lang/virtual_register.rs +++ b/sway-core/src/asm_lang/virtual_register.rs @@ -26,7 +26,7 @@ impl fmt::Display for VirtualRegister { } } -#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Debug, Clone)] +#[derive(Hash, PartialEq, Eq, PartialOrd, Ord, Debug, Clone, Copy)] /// These are the special registers defined in the spec pub enum ConstantRegister { // Below are VM-reserved registers @@ -46,12 +46,26 @@ pub enum ConstantRegister { ReturnValue, ReturnLength, Flags, + // Below are compiler-reserved registers DataSectionStart, + CallReturnAddress, + CallReturnValue, + Scratch, + + // Registers for the first NUM_ARG_REGISTERS function arguments. + FuncArg0, + FuncArg1, + FuncArg2, + FuncArg3, + FuncArg4, + FuncArg5, } +use crate::asm_generation::compiler_constants; + impl ConstantRegister { - pub(crate) fn to_register_id(&self) -> fuel_asm::RegisterId { + pub(crate) fn to_register_id(self) -> fuel_asm::RegisterId { use fuel_vm::consts::*; use ConstantRegister::*; match self { @@ -71,12 +85,31 @@ impl ConstantRegister { ReturnValue => REG_RET, ReturnLength => REG_RETL, Flags => REG_FLAG, - DataSectionStart => { - (crate::asm_generation::compiler_constants::DATA_SECTION_REGISTER) - as fuel_asm::RegisterId + + DataSectionStart => (compiler_constants::DATA_SECTION_REGISTER) as fuel_asm::RegisterId, + CallReturnAddress => { + (compiler_constants::RETURN_ADDRESS_REGISTER) as fuel_asm::RegisterId } + CallReturnValue => (compiler_constants::RETURN_VALUE_REGISTER) as fuel_asm::RegisterId, + Scratch => (compiler_constants::SCRATCH_REGISTER) as fuel_asm::RegisterId, + + FuncArg0 => compiler_constants::ARG_REG0 as fuel_asm::RegisterId, + FuncArg1 => compiler_constants::ARG_REG1 as fuel_asm::RegisterId, + FuncArg2 => compiler_constants::ARG_REG2 as fuel_asm::RegisterId, + FuncArg3 => compiler_constants::ARG_REG3 as fuel_asm::RegisterId, + FuncArg4 => compiler_constants::ARG_REG4 as fuel_asm::RegisterId, + FuncArg5 => compiler_constants::ARG_REG5 as fuel_asm::RegisterId, } } + + pub(crate) const ARG_REGS: [ConstantRegister; compiler_constants::NUM_ARG_REGISTERS as usize] = [ + ConstantRegister::FuncArg0, + ConstantRegister::FuncArg1, + ConstantRegister::FuncArg2, + ConstantRegister::FuncArg3, + ConstantRegister::FuncArg4, + ConstantRegister::FuncArg5, + ]; } impl fmt::Display for ConstantRegister { @@ -102,7 +135,16 @@ impl fmt::Display for ConstantRegister { // two `$` signs denotes this is a compiler-reserved register and not a // VM-reserved register DataSectionStart => "$$ds", + CallReturnAddress => "$$reta", + CallReturnValue => "$$retv", + Scratch => "$$tmp", + FuncArg0 => "$$arg0", + FuncArg1 => "$$arg1", + FuncArg2 => "$$arg2", + FuncArg3 => "$$arg3", + FuncArg4 => "$$arg4", + FuncArg5 => "$$arg5", }; - write!(f, "{}", text) + write!(f, "{text}") } } diff --git a/sway-core/src/ir_generation/const_eval.rs b/sway-core/src/ir_generation/const_eval.rs index f27e369904f..9644ea423de 100644 --- a/sway-core/src/ir_generation/const_eval.rs +++ b/sway-core/src/ir_generation/const_eval.rs @@ -206,13 +206,10 @@ fn const_eval_typed_expr( Some(cvs) => Some(cvs.clone()), None => { // 2. Check if name is a global constant. - use sway_ir::value::ValueDatum::Constant; - (lookup.lookup)(lookup, name).ok().flatten().and_then(|v| { - match &lookup.context.values[(v.0)].value { - Constant(cv) => Some(cv.clone()), - _ => None, - } - }) + (lookup.lookup)(lookup, name) + .ok() + .flatten() + .and_then(|v| v.get_constant(lookup.context).cloned()) } }, TypedExpressionVariant::StructExpression { fields, .. } => { @@ -284,13 +281,14 @@ fn const_eval_typed_expr( create_enum_aggregate(lookup.context, enum_decl.variants.clone()).unwrap(); let tag_value = Constant::new_uint(64, *tag as u64); let mut fields: Vec = vec![tag_value]; - contents.iter().for_each(|subexpr| { - const_eval_typed_expr(lookup, known_consts, subexpr) + match contents { + None => fields.push(Constant::new_unit()), + Some(subexpr) => const_eval_typed_expr(lookup, known_consts, subexpr) .into_iter() .for_each(|enum_val| { fields.push(enum_val); - }) - }); + }), + } Some(Constant::new_struct(&aggregate, fields)) } TypedExpressionVariant::StructFieldAccess { diff --git a/sway-core/src/ir_generation/function.rs b/sway-core/src/ir_generation/function.rs index 8c9a4a0bd88..0be3fccdac7 100644 --- a/sway-core/src/ir_generation/function.rs +++ b/sway-core/src/ir_generation/function.rs @@ -840,12 +840,26 @@ impl FnCompiler { [Type::B256, Type::Uint(64), Type::Uint(64)].to_vec(), ); - let addr = - self.compile_expression(context, md_mgr, *call_params.contract_address.clone())?; - let mut ra_struct_val = Constant::get_undef(context, Type::Struct(ra_struct_aggregate)) + let ra_struct_ptr = self + .function + .new_local_ptr( + context, + self.lexical_map.insert_anon(), + Type::Struct(ra_struct_aggregate), + false, + None, + ) + .map_err(|ir_error| CompileError::InternalOwned(ir_error.to_string(), Span::dummy()))?; + let ra_struct_ptr_ty = *ra_struct_ptr.get_type(context); + let mut ra_struct_val = self + .current_block + .ins(context) + .get_ptr(ra_struct_ptr, ra_struct_ptr_ty, 0) .add_metadatum(context, span_md_idx); // Insert the contract address + let addr = + self.compile_expression(context, md_mgr, *call_params.contract_address.clone())?; ra_struct_val = self .current_block .ins(context) @@ -868,7 +882,6 @@ impl FnCompiler { .add_metadatum(context, span_md_idx); // Insert the user args value. - ra_struct_val = self .current_block .ins(context) @@ -1483,8 +1496,18 @@ impl FnCompiler { let aggregate = Aggregate::new_array(context, elem_type, contents.len() as u64); // Compile each element and insert it immediately. - let mut array_value = Constant::get_undef(context, Type::Array(aggregate)) + let temp_name = self.lexical_map.insert_anon(); + let array_ptr = self + .function + .new_local_ptr(context, temp_name, Type::Array(aggregate), false, None) + .map_err(|ir_error| CompileError::InternalOwned(ir_error.to_string(), Span::dummy()))?; + let array_ptr_ty = *array_ptr.get_type(context); + let mut array_value = self + .current_block + .ins(context) + .get_ptr(array_ptr, array_ptr_ty, 0) .add_metadatum(context, span_md_idx); + for (idx, elem_expr) in contents.into_iter().enumerate() { let elem_value = self.compile_expression(context, md_mgr, elem_expr)?; if elem_value.is_diverging(context) { @@ -1510,28 +1533,39 @@ impl FnCompiler { span_md_idx: Option, ) -> Result { let array_expr_span = array_expr.span.clone(); + let array_val = self.compile_expression(context, md_mgr, array_expr)?; if array_val.is_diverging(context) { return Ok(array_val); } - let aggregate = match &context.values[array_val.0].value { - ValueDatum::Instruction(instruction) => { - instruction.get_aggregate(context).ok_or_else(|| { - CompileError::InternalOwned(format!( - "Unsupported instruction as array value for index expression. {instruction:?}"), - array_expr_span) - }) - } - ValueDatum::Argument(Type::Array(aggregate)) - | ValueDatum::Constant(Constant { ty : Type::Array(aggregate), ..}) => Ok (*aggregate), - otherwise => Err(CompileError::InternalOwned( - format!("Unsupported array value for index expression: {otherwise:?}"), + + let aggregate = if let Some(instruction) = array_val.get_instruction(context) { + instruction.get_aggregate(context).ok_or_else(|| { + CompileError::InternalOwned( + format!( + "Unsupported instruction as array value for index expression. \ + {instruction:?}" + ), + array_expr_span, + ) + }) + } else if let Some(Type::Array(agg)) = array_val.get_argument_type(context) { + Ok(agg) + } else if let Some(Constant { + ty: Type::Array(agg), + .. + }) = array_val.get_constant(context) + { + Ok(*agg) + } else { + Err(CompileError::InternalOwned( + "Unsupported array value for index expression.".to_owned(), array_expr_span, - )), + )) }?; // Check for out of bounds if we have a literal index. - let (_, count) = context.aggregates[aggregate.0].array_type(); + let (_, count) = aggregate.get_content(context).array_type(); if let TypedExpressionVariant::Literal(Literal::U64(index)) = index_expr.expression { if index >= *count { // XXX Here is a very specific case where we want to return an Error enum @@ -1584,10 +1618,20 @@ impl FnCompiler { field_types.push(field_ty); } - // Start with a constant empty struct and then fill in the values. + // Start with a temporary empty struct and then fill in the values. let aggregate = get_aggregate_for_types(context, &field_types)?; - let agg_value = Constant::get_undef(context, Type::Struct(aggregate)) + let temp_name = self.lexical_map.insert_anon(); + let struct_ptr = self + .function + .new_local_ptr(context, temp_name, Type::Struct(aggregate), false, None) + .map_err(|ir_error| CompileError::InternalOwned(ir_error.to_string(), Span::dummy()))?; + let struct_ptr_ty = *struct_ptr.get_type(context); + let agg_value = self + .current_block + .ins(context) + .get_ptr(struct_ptr, struct_ptr_ty, 0) .add_metadatum(context, span_md_idx); + Ok(inserted_values_indices.into_iter().fold( agg_value, |agg_value, (insert_val, insert_idx)| { @@ -1610,27 +1654,25 @@ impl FnCompiler { ) -> Result { let ast_struct_expr_span = ast_struct_expr.span.clone(); let struct_val = self.compile_expression(context, md_mgr, ast_struct_expr)?; - let aggregate = match &context.values[struct_val.0].value { - ValueDatum::Instruction(instruction) => { - instruction.get_aggregate(context).ok_or_else(|| { - CompileError::InternalOwned( - format!( - "Unsupported instruction as struct value for \ - field expression: {instruction:?}", - ), - ast_struct_expr_span, - ) + let aggregate = if let Some(instruction) = struct_val.get_instruction(context) { + instruction.get_aggregate(context).ok_or_else(|| { + CompileError::InternalOwned(format!( + "Unsupported instruction as struct value for field expression. {instruction:?}"), + ast_struct_expr_span) }) - } - ValueDatum::Argument(Type::Struct(aggregate)) - | ValueDatum::Constant(Constant { - ty: Type::Struct(aggregate), - .. - }) => Ok(*aggregate), - otherwise => Err(CompileError::InternalOwned( - format!("Unsupported struct value for field expression: {otherwise:?}",), + } else if let Some(Type::Struct(agg)) = struct_val.get_argument_type(context) { + Ok(agg) + } else if let Some(Constant { + ty: Type::Struct(agg), + .. + }) = struct_val.get_constant(context) + { + Ok(*agg) + } else { + Err(CompileError::InternalOwned( + "Unsupported struct value for field expression.".to_owned(), ast_struct_expr_span, - )), + )) }?; let field_kind = ProjectionKind::StructField { @@ -1679,19 +1721,28 @@ impl FnCompiler { let tag_value = Constant::get_uint(context, 64, tag as u64).add_metadatum(context, span_md_idx); - // Start with the undef and insert the tag. - let agg_value = Constant::get_undef(context, Type::Struct(aggregate)) + // Start with a temporary local struct and insert the tag. + let temp_name = self.lexical_map.insert_anon(); + let enum_ptr = self + .function + .new_local_ptr(context, temp_name, Type::Struct(aggregate), false, None) + .map_err(|ir_error| CompileError::InternalOwned(ir_error.to_string(), Span::dummy()))?; + let enum_ptr_ty = *enum_ptr.get_type(context); + let enum_ptr_value = self + .current_block + .ins(context) + .get_ptr(enum_ptr, enum_ptr_ty, 0) .add_metadatum(context, span_md_idx); let agg_value = self .current_block .ins(context) - .insert_value(agg_value, aggregate, tag_value, vec![0]) + .insert_value(enum_ptr_value, aggregate, tag_value, vec![0]) .add_metadatum(context, span_md_idx); // If the struct representing the enum has only one field, then that field is basically the // tag and all the variants must have unit types, hence the absence of the union. // Therefore, there is no need for another `insert_value` instruction here. - match &context.aggregates[aggregate.0] { + match aggregate.get_content(context) { AggregateContent::FieldTypes(field_tys) => { Ok(if field_tys.len() == 1 { agg_value @@ -1738,7 +1789,18 @@ impl FnCompiler { } let aggregate = Aggregate::new_struct(context, init_types); - let agg_value = Constant::get_undef(context, Type::Struct(aggregate)) + let temp_name = self.lexical_map.insert_anon(); + let tuple_ptr = self + .function + .new_local_ptr(context, temp_name, Type::Struct(aggregate), false, None) + .map_err(|ir_error| { + CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) + })?; + let tuple_ptr_ty = *tuple_ptr.get_type(context); + let agg_value = self + .current_block + .ins(context) + .get_ptr(tuple_ptr, tuple_ptr_ty, 0) .add_metadatum(context, span_md_idx); Ok(init_values.into_iter().enumerate().fold( @@ -1870,10 +1932,21 @@ impl FnCompiler { ) -> Result { match ty { Type::Struct(aggregate) => { - let mut struct_val = Constant::get_undef(context, Type::Struct(*aggregate)) + let temp_name = self.lexical_map.insert_anon(); + let struct_ptr = self + .function + .new_local_ptr(context, temp_name, Type::Struct(*aggregate), false, None) + .map_err(|ir_error| { + CompileError::InternalOwned(ir_error.to_string(), Span::dummy()) + })?; + let struct_ptr_ty = *struct_ptr.get_type(context); + let mut struct_val = self + .current_block + .ins(context) + .get_ptr(struct_ptr, struct_ptr_ty, 0) .add_metadatum(context, span_md_idx); - let fields = context.aggregates[aggregate.0].field_types().clone(); + let fields = aggregate.get_content(context).field_types().clone(); for (field_idx, field_type) in fields.into_iter().enumerate() { let field_idx = field_idx as u64; @@ -1990,7 +2063,7 @@ impl FnCompiler { ) -> Result<(), CompileError> { match ty { Type::Struct(aggregate) => { - let fields = context.aggregates[aggregate.0].field_types().clone(); + let fields = aggregate.get_content(context).field_types().clone(); for (field_idx, field_type) in fields.into_iter().enumerate() { let field_idx = field_idx as u64; diff --git a/sway-core/src/ir_generation/lexical_map.rs b/sway-core/src/ir_generation/lexical_map.rs index 37f47763ed8..b4a2ceca534 100644 --- a/sway-core/src/ir_generation/lexical_map.rs +++ b/sway-core/src/ir_generation/lexical_map.rs @@ -8,20 +8,20 @@ // and remove shadowing symbols, the re-use of symbol names can't be allowed, so all names are // reserved even when they're not 'currently' valid. -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; pub(super) struct LexicalMap { symbol_map: Vec>, - reserved_sybols: Vec, + reserved_sybols: HashSet, } impl LexicalMap { pub(super) fn from_iter>(names: I) -> Self { - let (root_symbol_map, reserved_sybols): (HashMap, Vec) = names + let (root_symbol_map, reserved_sybols): (HashMap, HashSet) = names .into_iter() - .fold((HashMap::new(), Vec::new()), |(mut m, mut r), name| { + .fold((HashMap::new(), HashSet::new()), |(mut m, mut r), name| { m.insert(name.clone(), name.clone()); - r.push(name); + r.insert(name); (m, r) }); @@ -53,13 +53,12 @@ impl LexicalMap { pub(super) fn insert(&mut self, new_symbol: String) -> String { // Insert this new symbol into this lexical scope. If it has ever existed then the // original will be shadowed and the shadower is returned. - fn get_new_local_symbol(reserved: &[String], candidate: String) -> String { - match reserved.iter().find(|&reserved| reserved == &candidate) { - None => candidate, - Some(_) => { - // Try again with adjusted candidate. - get_new_local_symbol(reserved, format!("{candidate}_")) - } + fn get_new_local_symbol(reserved: &HashSet, candidate: String) -> String { + if reserved.contains(&candidate) { + // Try again with adjusted candidate. + get_new_local_symbol(reserved, format!("{candidate}_")) + } else { + candidate } } let local_symbol = get_new_local_symbol(&self.reserved_sybols, new_symbol.clone()); @@ -67,7 +66,22 @@ impl LexicalMap { .last_mut() .expect("LexicalMap should always have at least the root scope.") .insert(new_symbol, local_symbol.clone()); - self.reserved_sybols.push(local_symbol.clone()); + self.reserved_sybols.insert(local_symbol.clone()); local_symbol } + + // Generate and reserve a unique 'anonymous' symbol. Is in the form `__anon_X` where `X` is a + // unique number. + pub(super) fn insert_anon(&mut self) -> String { + let anon_symbol = (0..) + .map(|n| format!("__anon_{n}")) + .find(|candidate| !self.reserved_sybols.contains(candidate)) + .unwrap(); + self.symbol_map + .last_mut() + .expect("LexicalMap should always have at least the root scope.") + .insert(anon_symbol.clone(), anon_symbol.clone()); + self.reserved_sybols.insert(anon_symbol.clone()); + anon_symbol + } } diff --git a/sway-core/src/ir_generation/purity.rs b/sway-core/src/ir_generation/purity.rs index 53564d7c1b5..897c260fbc5 100644 --- a/sway-core/src/ir_generation/purity.rs +++ b/sway-core/src/ir_generation/purity.rs @@ -4,7 +4,7 @@ use crate::{ parse_tree::{promote_purity, Purity}, }; -use sway_ir::{Context, Function, Instruction, ValueDatum}; +use sway_ir::{Context, Function, Instruction}; use sway_types::span::Span; use std::collections::HashMap; @@ -36,45 +36,51 @@ impl PurityChecker { // - via calls into functions with the above. let (reads, writes) = function.instruction_iter(context).fold( (false, false), - |(reads, writes), (_block, ins_value)| match &context.values[ins_value.0].value { - ValueDatum::Instruction(Instruction::StateLoadQuadWord { .. }) - | ValueDatum::Instruction(Instruction::StateLoadWord(_)) => (true, writes), - - ValueDatum::Instruction(Instruction::StateStoreQuadWord { .. }) - | ValueDatum::Instruction(Instruction::StateStoreWord { .. }) => (reads, true), - - // Iterate for and check each instruction in the ASM block. - ValueDatum::Instruction(Instruction::AsmBlock(asm_block, _args)) => { - context.asm_blocks[asm_block.0].body.iter().fold( - (reads, writes), - |(reads, writes), asm_op| match asm_op.name.as_str() { - "srw" | "srwq" => (true, writes), - "sww" | "swwq" => (reads, true), - _ => (reads, writes), - }, - ) - } - - // Recurse to find the called function purity. Use memoisation to avoid redoing - // work. - ValueDatum::Instruction(Instruction::Call(callee, _args)) => { - let (called_fn_reads, called_fn_writes) = - self.memos.get(callee).copied().unwrap_or_else(|| { - let r_w = self.check_function(context, md_mgr, callee); - self.memos.insert(*callee, r_w); - r_w - }); - (reads || called_fn_reads, writes || called_fn_writes) - } - - _otherwise => (reads, writes), + |(reads, writes), (_block, ins_value)| { + ins_value + .get_instruction(context) + .map(|instruction| { + match instruction { + Instruction::StateLoadQuadWord { .. } + | Instruction::StateLoadWord(_) => (true, writes), + + Instruction::StateStoreQuadWord { .. } + | Instruction::StateStoreWord { .. } => (reads, true), + + // Iterate for and check each instruction in the ASM block. + Instruction::AsmBlock(asm_block, _args) => { + asm_block.get_content(context).body.iter().fold( + (reads, writes), + |(reads, writes), asm_op| match asm_op.name.as_str() { + "srw" | "srwq" => (true, writes), + "sww" | "swwq" => (reads, true), + _ => (reads, writes), + }, + ) + } + + // Recurse to find the called function purity. Use memoisation to + // avoid redoing work. + Instruction::Call(callee, _args) => { + let (called_fn_reads, called_fn_writes) = + self.memos.get(callee).copied().unwrap_or_else(|| { + let r_w = self.check_function(context, md_mgr, callee); + self.memos.insert(*callee, r_w); + r_w + }); + (reads || called_fn_reads, writes || called_fn_writes) + } + + _otherwise => (reads, writes), + } + }) + .unwrap_or_else(|| (reads, writes)) }, ); - let function = &context.functions[function.0]; - let attributed_purity = md_mgr.md_to_storage_op(context, function.metadata); + let attributed_purity = md_mgr.md_to_storage_op(context, function.get_metadata(context)); let span = md_mgr - .md_to_span(context, function.metadata) + .md_to_span(context, function.get_metadata(context)) .unwrap_or_else(Span::dummy); // Simple macros for each of the error types, which also grab `span`. diff --git a/sway-core/src/ir_generation/storage.rs b/sway-core/src/ir_generation/storage.rs index b981b1ce890..821f75aa27a 100644 --- a/sway-core/src/ir_generation/storage.rs +++ b/sway-core/src/ir_generation/storage.rs @@ -104,7 +104,7 @@ pub fn serialize_to_storage_slots( unimplemented!("Arrays in storage have not been implemented yet.") } (Type::Struct(aggregate), ConstantValue::Struct(vec)) => { - match &context.aggregates[aggregate.0] { + match aggregate.get_content(context) { AggregateContent::FieldTypes(field_tys) => vec .iter() .zip(field_tys.iter()) @@ -202,7 +202,7 @@ pub fn serialize_to_words(constant: &Constant, context: &Context, ty: &Type) -> unimplemented!("Arrays in storage have not been implemented yet.") } (Type::Struct(aggregate), ConstantValue::Struct(vec)) => { - match &context.aggregates[aggregate.0] { + match aggregate.get_content(context) { AggregateContent::FieldTypes(field_tys) => vec .iter() .zip(field_tys.iter()) diff --git a/sway-core/src/lib.rs b/sway-core/src/lib.rs index 2bfe87baf79..01149934a78 100644 --- a/sway-core/src/lib.rs +++ b/sway-core/src/lib.rs @@ -28,7 +28,7 @@ use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::Arc; use sway_ast::Dependency; -use sway_ir::{Kind, Module}; +use sway_ir::{Context, Function, Instruction, Kind, Module, Value}; pub use semantic_analysis::{ namespace::{self, Namespace}, @@ -379,8 +379,6 @@ pub fn ast_to_asm( } } -use sway_ir::{context::Context, function::Function}; - pub(crate) fn compile_ast_to_ir_to_asm( program: TypedProgram, build_config: &BuildConfig, @@ -408,17 +406,16 @@ pub(crate) fn compile_ast_to_ir_to_asm( // Find all the entry points. This is main for scripts and predicates, or ABI methods for // contracts, identified by them having a selector. let entry_point_functions: Vec<::sway_ir::Function> = ir - .functions - .iter() - .filter_map(|(idx, fc)| { - if (matches!(tree_type, TreeType::Script | TreeType::Predicate) - && fc.name == crate::constants::DEFAULT_ENTRY_POINT_FN_NAME) - || (tree_type == TreeType::Contract && fc.selector.is_some()) - { - Some(::sway_ir::function::Function(idx)) - } else { - None - } + .module_iter() + .flat_map(|module| module.function_iter(&ir)) + .filter(|func| { + let is_script_or_predicate = + matches!(tree_type, TreeType::Script | TreeType::Predicate); + let is_contract = tree_type == TreeType::Contract; + let has_entry_name = + func.get_name(&ir) == crate::constants::DEFAULT_ENTRY_POINT_FN_NAME; + + (is_script_or_predicate && has_entry_name) || (is_contract && func.has_selector(&ir)) }) .collect(); @@ -435,9 +432,15 @@ pub(crate) fn compile_ast_to_ir_to_asm( errors ); - // Inline function calls from the entry points. + // Now we're working with all functions in the module. + let all_functions = ir + .module_iter() + .flat_map(|module| module.function_iter(&ir)) + .collect::>(); + + // Inline function calls. check!( - inline_function_calls(&mut ir, &entry_point_functions), + inline_function_calls(&mut ir, &all_functions), return err(warnings, errors), warnings, errors @@ -446,33 +449,33 @@ pub(crate) fn compile_ast_to_ir_to_asm( // TODO: Experiment with putting combine-constants and simplify-cfg // in a loop, but per function. check!( - combine_constants(&mut ir, &entry_point_functions), + combine_constants(&mut ir, &all_functions), return err(warnings, errors), warnings, errors ); check!( - simplify_cfg(&mut ir, &entry_point_functions), + simplify_cfg(&mut ir, &all_functions), return err(warnings, errors), warnings, errors ); // Simplify-CFG helps combine constants. check!( - combine_constants(&mut ir, &entry_point_functions), + combine_constants(&mut ir, &all_functions), return err(warnings, errors), warnings, errors ); // And that in-turn enables more simplify-cfg. check!( - simplify_cfg(&mut ir, &entry_point_functions), + simplify_cfg(&mut ir, &all_functions), return err(warnings, errors), warnings, errors ); - // Remove dead definitions. + // Remove dead definitions based on the entry points root set. check!( dce(&mut ir, &entry_point_functions), return err(warnings, errors), @@ -488,8 +491,49 @@ pub(crate) fn compile_ast_to_ir_to_asm( } fn inline_function_calls(ir: &mut Context, functions: &[Function]) -> CompileResult<()> { + // Inspect ALL calls and count how often each function is called. + let call_counts: HashMap = + functions.iter().fold(HashMap::new(), |mut counts, func| { + for (_block, ins) in func.instruction_iter(ir) { + if let Some(Instruction::Call(callee, _args)) = ins.get_instruction(ir) { + counts + .entry(*callee) + .and_modify(|count| *count += 1) + .or_insert(1); + } + } + counts + }); + + let inline_heuristic = |ctx: &Context, func: &Function, _call_site: &Value| { + // For now, pending improvements to ASMgen for calls, we must inline any function which has + // a non-copy return type or has too many args. + if !func.get_return_type(ctx).is_copy_type() + || func.args_iter(ctx).count() as u8 + > crate::asm_generation::compiler_constants::NUM_ARG_REGISTERS + { + return true; + } + + // If the function is called only once then definitely inline it. + let call_count = call_counts.get(func).copied().unwrap_or(0); + if call_count == 1 { + return true; + } + + // If the function is (still) small then also inline it. + const MAX_INLINE_INSTRS_COUNT: usize = 4; + if func.num_instructions(ctx) <= MAX_INLINE_INSTRS_COUNT { + return true; + } + + false + }; + for function in functions { - if let Err(ir_error) = sway_ir::optimize::inline_all_function_calls(ir, function) { + if let Err(ir_error) = + sway_ir::optimize::inline_some_function_calls(ir, function, inline_heuristic) + { return err( Vec::new(), vec![CompileError::InternalOwned( @@ -517,16 +561,24 @@ fn combine_constants(ir: &mut Context, functions: &[Function]) -> CompileResult< ok((), Vec::new(), Vec::new()) } -fn dce(ir: &mut Context, functions: &[Function]) -> CompileResult<()> { - for function in functions { - if let Err(ir_error) = sway_ir::optimize::dce(ir, function) { - return err( - Vec::new(), - vec![CompileError::InternalOwned( - ir_error.to_string(), - span::Span::dummy(), - )], - ); +fn dce(ir: &mut Context, entry_functions: &[Function]) -> CompileResult<()> { + // Remove entire dead functions first. + for module in ir.module_iter() { + sway_ir::optimize::func_dce(ir, &module, entry_functions); + } + + // Then DCE all the remaining functions. + for module in ir.module_iter() { + for function in module.function_iter(ir) { + if let Err(ir_error) = sway_ir::optimize::dce(ir, &function) { + return err( + Vec::new(), + vec![CompileError::InternalOwned( + ir_error.to_string(), + span::Span::dummy(), + )], + ); + } } } ok((), Vec::new(), Vec::new()) diff --git a/sway-core/src/metadata.rs b/sway-core/src/metadata.rs index 2555b832237..4149f452f7d 100644 --- a/sway-core/src/metadata.rs +++ b/sway-core/src/metadata.rs @@ -42,7 +42,8 @@ impl MetadataManager { Self::for_each_md_idx(context, md_idx, |md_idx| { self.md_span_cache.get(&md_idx).cloned().or_else(|| { // Create a new span and save it in the cache. - context.metadata[md_idx.0] + md_idx + .get_content(context) .unwrap_struct("span", 3) .and_then(|fields| { let (path, src) = self.md_to_file_location(context, &fields[0])?; @@ -66,7 +67,8 @@ impl MetadataManager { Self::for_each_md_idx(context, md_idx, |md_idx| { self.md_storage_op_cache.get(&md_idx).copied().or_else(|| { // Create a new storage op and save it in the cache. - context.metadata[md_idx.0] + md_idx + .get_content(context) .unwrap_struct("storage", 1) .and_then(|fields| { fields[0].unwrap_string().and_then(|stor_str| { @@ -94,7 +96,8 @@ impl MetadataManager { Self::for_each_md_idx(context, md_idx, |md_idx| { self.md_storage_key_cache.get(&md_idx).copied().or_else(|| { // Create a new storage key and save it in the cache. - context.metadata[md_idx.0] + md_idx + .get_content(context) .unwrap_struct("state_index", 1) .and_then(|fields| { let key = fields[0].unwrap_integer()?; @@ -115,7 +118,8 @@ impl MetadataManager { md.unwrap_index().and_then(|md_idx| { self.md_file_loc_cache.get(&md_idx).cloned().or_else(|| { // Create a new file location (path and src) and save it in the cache. - context.metadata[md_idx.0] + md_idx + .get_content(context) .unwrap_string() .and_then(|path_buf_str| { let path_buf = PathBuf::from(path_buf_str); @@ -131,11 +135,11 @@ impl MetadataManager { } pub(crate) fn val_to_span(&mut self, context: &Context, value: Value) -> Option { - self.md_to_span(context, context.values[value.0].metadata) + self.md_to_span(context, value.get_metadata(context)) } pub(crate) fn val_to_storage_key(&mut self, context: &Context, value: Value) -> Option { - self.md_to_storage_key(context, context.values[value.0].metadata) + self.md_to_storage_key(context, value.get_metadata(context)) } pub(crate) fn span_to_md( @@ -147,14 +151,15 @@ impl MetadataManager { span.path().and_then(|path_buf| { // Create new metadata. let file_location_md_idx = self.file_location_to_md(context, path_buf)?; - let md_idx = MetadataIndex(context.metadata.insert(Metadatum::Struct( - "span".to_owned(), + let md_idx = MetadataIndex::new_struct( + context, + "span", vec![ Metadatum::Index(file_location_md_idx), Metadatum::Integer(span.start() as u64), Metadatum::Integer(span.end() as u64), ], - ))); + ); self.span_md_cache.insert(span.clone(), md_idx); @@ -173,10 +178,11 @@ impl MetadataManager { .copied() .or_else(|| { // Create new metadatum. - let md_idx = MetadataIndex(context.metadata.insert(Metadatum::Struct( - "state_index".to_owned(), + let md_idx = MetadataIndex::new_struct( + context, + "state_index", vec![Metadatum::Integer(storage_key)], - ))); + ); self.storage_key_md_cache.insert(storage_key, md_idx); @@ -201,10 +207,11 @@ impl MetadataManager { Purity::Writes => "writes", Purity::ReadsWrites => "readswrites", }; - let md_idx = MetadataIndex(context.metadata.insert(Metadatum::Struct( - "storage".to_owned(), + let md_idx = MetadataIndex::new_struct( + context, + "storage", vec![Metadatum::String(field.to_owned())], - ))); + ); self.storage_op_md_cache.insert(purity, md_idx); @@ -222,11 +229,7 @@ impl MetadataManager { .get(&Arc::as_ptr(path)) .copied() .or_else(|| { - let md_idx = MetadataIndex( - context - .metadata - .insert(Metadatum::String(path.to_string_lossy().into())), - ); + let md_idx = MetadataIndex::new_string(context, path.to_string_lossy()); self.file_loc_md_cache.insert(Arc::as_ptr(path), md_idx); @@ -241,7 +244,7 @@ impl MetadataManager { ) -> Option { // If md_idx is not None and is a list then try them all. md_idx.and_then(|md_idx| { - if let Metadatum::List(md_idcs) = &context.metadata[md_idx.0] { + if let Some(md_idcs) = md_idx.get_content(context).unwrap_list() { md_idcs.iter().find_map(|md_idx| f(*md_idx)) } else { f(md_idx) diff --git a/sway-core/src/parse_tree/literal.rs b/sway-core/src/parse_tree/literal.rs index f5b2b659833..a3c0f92d81a 100644 --- a/sway-core/src/parse_tree/literal.rs +++ b/sway-core/src/parse_tree/literal.rs @@ -126,53 +126,6 @@ impl Literal { } } - /// Converts a literal to a big-endian representation. This is padded to words. - pub(crate) fn to_bytes(&self) -> Vec { - use Literal::*; - match self { - U8(val) => vec![0, 0, 0, 0, 0, 0, 0, val.to_be_bytes()[0]], - U16(val) => { - let bytes = val.to_be_bytes(); - vec![0, 0, 0, 0, 0, 0, bytes[0], bytes[1]] - } - U32(val) => { - let bytes = val.to_be_bytes(); - vec![0, 0, 0, 0, bytes[0], bytes[1], bytes[2], bytes[3]] - } - U64(val) => val.to_be_bytes().to_vec(), - Numeric(val) => val.to_be_bytes().to_vec(), - Boolean(b) => { - vec![ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - if *b { 0b00000001 } else { 0b00000000 }, - ] - } - // assume utf8 for now - String(st) => { - let mut buf = st.as_str().to_string().into_bytes(); - // pad to word alignment - while buf.len() % 8 != 0 { - buf.push(0); - } - buf - } - Byte(b) => vec![0, 0, 0, 0, 0, 0, 0, b.to_be_bytes()[0]], - B256(b) => b.to_vec(), - } - } - - /// Used when creating a pointer literal value, typically during code generation for - /// values that wouldn't fit in a register. - pub(crate) fn new_pointer_literal(offset_bytes: u64) -> Literal { - Literal::U64(offset_bytes) - } - #[allow(clippy::wildcard_in_or_patterns)] pub(crate) fn handle_parse_int_error( e: ParseIntError, diff --git a/sway-core/src/type_system/resolved_type.rs b/sway-core/src/type_system/resolved_type.rs index 3d788cfe388..4b66e2df09f 100644 --- a/sway-core/src/type_system/resolved_type.rs +++ b/sway-core/src/type_system/resolved_type.rs @@ -50,20 +50,3 @@ impl Default for ResolvedType { ResolvedType::Unit } } - -impl ResolvedType { - pub(crate) fn is_copy_type(&self) -> bool { - matches!( - self, - ResolvedType::Boolean - | ResolvedType::Byte - | ResolvedType::Unit - | ResolvedType::UnsignedInteger(_) - ) - } - - #[allow(dead_code)] - pub fn is_numeric(&self) -> bool { - matches!(self, ResolvedType::UnsignedInteger(_)) - } -} diff --git a/sway-core/tests/ir_to_asm/bigger_asm_block.asm b/sway-core/tests/ir_to_asm/bigger_asm_block.asm index d843843b3e3..a8cfb96a2a7 100644 --- a/sway-core/tests/ir_to_asm/bigger_asm_block.asm +++ b/sway-core/tests/ir_to_asm/bigger_asm_block.asm @@ -6,7 +6,7 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r2 $sp ; save locals base register -cfei i32 ; allocate 32 bytes for all locals +cfei i32 ; allocate 32 bytes for locals addi $r0 $r2 i0 ; get offset reg for get_ptr lw $r1 data_0 ; literal instantiation addi $r0 $r2 i0 ; get store offset @@ -19,5 +19,5 @@ meq $r0 $r2 $r1 $r0 ; asm block ret $r0 noop ; word-alignment of data section .data: -data_0 .b256 0x0202020202020202020202020202020202020202020202020202020202020202 -data_1 .b256 0x0303030303030303030303030303030303030303030303030303030303030303 +data_0 .bytes[32] 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 02 ................................ +data_1 .bytes[32] 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 03 ................................ diff --git a/sway-core/tests/ir_to_asm/binops.asm b/sway-core/tests/ir_to_asm/binops.asm index bfd20744987..83e4cd0fd81 100644 --- a/sway-core/tests/ir_to_asm/binops.asm +++ b/sway-core/tests/ir_to_asm/binops.asm @@ -6,7 +6,7 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r2 $sp ; save locals base register -cfei i48 ; allocate 48 bytes for all locals +cfei i48 ; allocate 48 bytes for locals addi $r0 $r2 i0 ; get offset reg for get_ptr lw $r0 data_0 ; literal instantiation sw $r2 $r0 i0 ; store value @@ -17,9 +17,9 @@ addi $r0 $r2 i0 ; get offset reg for get_ptr lw $r1 $r2 i0 ; load value addi $r0 $r2 i8 ; get offset reg for get_ptr lw $r0 $r2 i1 ; load value -add $r0 $r1 $r0 -addi $r1 $r2 i16 ; get offset reg for get_ptr -sw $r2 $r0 i2 ; store value +add $r1 $r1 $r0 +addi $r0 $r2 i16 ; get offset reg for get_ptr +sw $r2 $r1 i2 ; store value addi $r0 $r2 i16 ; get offset reg for get_ptr lw $r1 $r2 i2 ; load value lw $r0 data_2 ; literal instantiation @@ -41,6 +41,6 @@ addi $r0 $r2 i40 ; get offset reg for get_ptr lw $r0 $r2 i5 ; load value ret $r0 .data: -data_0 .u64 0x16 -data_1 .u64 0x2c -data_2 .u64 0x02 +data_0 .word 22 +data_1 .word 44 +data_2 .word 2 diff --git a/sway-core/tests/ir_to_asm/enum_in_storage_read.asm b/sway-core/tests/ir_to_asm/enum_in_storage_read.asm index 4805f2a24a7..1e73bb541ad 100644 --- a/sway-core/tests/ir_to_asm/enum_in_storage_read.asm +++ b/sway-core/tests/ir_to_asm/enum_in_storage_read.asm @@ -5,83 +5,83 @@ DATA_SECTION_OFFSET[0..32] DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is -lw $r1 $fp i73 ; load input function selector -lw $r0 data_7 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i11 ; jump to selected function -rvrt $zero ; revert if no selectors matched +lw $r0 $fp i73 ; load input function selector +lw $r1 data_9 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i12 ; jump to selected function +movi $$tmp i123 ; special code for mismatched selector +rvrt $$tmp ; revert if no selectors matched move $r5 $sp ; save locals base register -cfei i256 ; allocate 256 bytes for all locals +cfei i256 ; allocate 256 bytes for locals addi $r0 $r5 i0 ; get offset reg for get_ptr lw $r1 data_0 ; literal instantiation addi $r0 $r5 i0 ; get store offset mcpi $r0 $r1 i32 ; store value addi $r0 $r5 i0 ; get offset srw $r0 $r0 ; single word state access -move $r2 $sp ; save register for temporary stack value -cfei i48 ; allocate 48 bytes for temporary struct -sw $r2 $r0 i0 ; insert_value @ 0 -addi $r0 $r5 i32 ; get offset reg for get_ptr lw $r1 data_1 ; literal instantiation -addi $r0 $r5 i32 ; get store offset -mcpi $r0 $r1 i32 ; store value +sw $r1 $r0 i0 ; insert_value @ 0 +addi $r0 $r5 i32 ; get offset reg for get_ptr +lw $r0 data_2 ; literal instantiation +addi $r2 $r5 i32 ; get store offset +mcpi $r2 $r0 i32 ; store value addi $r3 $r5 i128 ; get offset reg for get_ptr addi $r0 $r5 i128 ; get offset reg for get_ptr -addi $r1 $r5 i128 ; get offset +addi $r2 $r5 i128 ; get offset addi $r0 $r5 i32 ; get offset -srwq $r1 $r0 ; quad word state access +srwq $r2 $r0 ; quad word state access addi $r0 $r5 i32 ; get offset reg for get_ptr -lw $r1 data_2 ; literal instantiation +lw $r2 data_3 ; literal instantiation addi $r0 $r5 i32 ; get store offset -mcpi $r0 $r1 i32 ; store value +mcpi $r0 $r2 i32 ; store value addi $r0 $r5 i160 ; get offset reg for get_ptr -addi $r1 $r5 i160 ; get offset +addi $r2 $r5 i160 ; get offset addi $r0 $r5 i32 ; get offset -srwq $r1 $r0 ; quad word state access -addi $r0 $r2 i8 ; get struct field(s) 1 offset +srwq $r2 $r0 ; quad word state access +addi $r0 $r1 i8 ; get struct field(s) 1 offset mcpi $r0 $r3 i40 ; store struct field value addi $r0 $r5 i64 ; get offset reg for get_ptr -lw $r1 data_3 ; literal instantiation +lw $r2 data_4 ; literal instantiation addi $r0 $r5 i64 ; get store offset -mcpi $r0 $r1 i32 ; store value +mcpi $r0 $r2 i32 ; store value addi $r0 $r5 i64 ; get offset srw $r0 $r0 ; single word state access -move $r4 $sp ; save register for temporary stack value -cfei i48 ; allocate 48 bytes for temporary struct +lw $r4 data_1 ; literal instantiation sw $r4 $r0 i0 ; insert_value @ 0 addi $r0 $r5 i96 ; get offset reg for get_ptr -lw $r1 data_4 ; literal instantiation +lw $r2 data_5 ; literal instantiation addi $r0 $r5 i96 ; get store offset -mcpi $r0 $r1 i32 ; store value +mcpi $r0 $r2 i32 ; store value addi $r3 $r5 i192 ; get offset reg for get_ptr addi $r0 $r5 i192 ; get offset reg for get_ptr -addi $r1 $r5 i192 ; get offset +addi $r2 $r5 i192 ; get offset addi $r0 $r5 i96 ; get offset -srwq $r1 $r0 ; quad word state access +srwq $r2 $r0 ; quad word state access addi $r0 $r5 i96 ; get offset reg for get_ptr -lw $r1 data_5 ; literal instantiation +lw $r2 data_6 ; literal instantiation addi $r0 $r5 i96 ; get store offset -mcpi $r0 $r1 i32 ; store value +mcpi $r0 $r2 i32 ; store value addi $r0 $r5 i224 ; get offset reg for get_ptr -addi $r1 $r5 i224 ; get offset +addi $r2 $r5 i224 ; get offset addi $r0 $r5 i96 ; get offset -srwq $r1 $r0 ; quad word state access +srwq $r2 $r0 ; quad word state access addi $r0 $r4 i8 ; get struct field(s) 1 offset mcpi $r0 $r3 i40 ; store struct field value -move $r1 $sp ; save register for temporary stack value -cfei i96 ; allocate 96 bytes for temporary struct -addi $r0 $r1 i0 ; get struct field(s) 0 offset -mcpi $r0 $r2 i48 ; store struct field value -addi $r0 $r1 i48 ; get struct field(s) 1 offset +lw $r2 data_7 ; literal instantiation +addi $r0 $r2 i0 ; get struct field(s) 0 offset +mcpi $r0 $r1 i48 ; store struct field value +addi $r0 $r2 i48 ; get struct field(s) 1 offset mcpi $r0 $r4 i48 ; store struct field value -lw $r0 data_6 ; loading size for RETD -retd $r1 $r0 +lw $r0 data_8 ; loading size for RETD +retd $r2 $r0 .data: -data_0 .b256 0xd625ff6d8e88efd7bb3476e748e5d5935618d78bfc7eedf584fe909ce0809fc3 -data_1 .b256 0xc4f29cca5a7266ecbc35c82c55dd2b0059a3db4c83a3410653ec33aded8e9840 -data_2 .b256 0xc4f29cca5a7266ecbc35c82c55dd2b0059a3db4c83a3410653ec33aded8e9841 -data_3 .b256 0x2817e0819d6fcad797114fbcf350fa281aca33a39b0abf977797bddd69b8e7af -data_4 .b256 0x12ea9b9b05214a0d64996d259c59202b80a21415bb68b83121353e2a5925ec47 -data_5 .b256 0x12ea9b9b05214a0d64996d259c59202b80a21415bb68b83121353e2a5925ec48 -data_6 .u64 0x60 -data_7 .u32 0x1665bf4 +data_0 .bytes[32] d6 25 ff 6d 8e 88 ef d7 bb 34 76 e7 48 e5 d5 93 56 18 d7 8b fc 7e ed f5 84 fe 90 9c e0 80 9f c3 .%.m.....4v.H...V....~.......... +data_1 .collection { .word 0, .word 0 } +data_2 .bytes[32] c4 f2 9c ca 5a 72 66 ec bc 35 c8 2c 55 dd 2b 00 59 a3 db 4c 83 a3 41 06 53 ec 33 ad ed 8e 98 40 ....Zrf..5.,U.+.Y..L..A.S.3....@ +data_3 .bytes[32] c4 f2 9c ca 5a 72 66 ec bc 35 c8 2c 55 dd 2b 00 59 a3 db 4c 83 a3 41 06 53 ec 33 ad ed 8e 98 41 ....Zrf..5.,U.+.Y..L..A.S.3....A +data_4 .bytes[32] 28 17 e0 81 9d 6f ca d7 97 11 4f bc f3 50 fa 28 1a ca 33 a3 9b 0a bf 97 77 97 bd dd 69 b8 e7 af (....o....O..P.(..3.....w...i... +data_5 .bytes[32] 12 ea 9b 9b 05 21 4a 0d 64 99 6d 25 9c 59 20 2b 80 a2 14 15 bb 68 b8 31 21 35 3e 2a 59 25 ec 47 .....!J.d.m%.Y +.....h.1!5>*Y%.G +data_6 .bytes[32] 12 ea 9b 9b 05 21 4a 0d 64 99 6d 25 9c 59 20 2b 80 a2 14 15 bb 68 b8 31 21 35 3e 2a 59 25 ec 48 .....!J.d.m%.Y +.....h.1!5>*Y%.H +data_7 .collection { .collection { .word 0, .word 0 }, .collection { .word 0, .word 0 } } +data_8 .word 96 +data_9 .word 23485428 diff --git a/sway-core/tests/ir_to_asm/enum_in_storage_write.asm b/sway-core/tests/ir_to_asm/enum_in_storage_write.asm index 45df90e923e..77bafeb9cd1 100644 --- a/sway-core/tests/ir_to_asm/enum_in_storage_write.asm +++ b/sway-core/tests/ir_to_asm/enum_in_storage_write.asm @@ -5,90 +5,84 @@ DATA_SECTION_OFFSET[0..32] DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is -lw $r1 $fp i73 ; load input function selector -lw $r0 data_8 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i11 ; jump to selected function -rvrt $zero ; revert if no selectors matched -move $r5 $sp ; save locals base register -cfei i256 ; allocate 256 bytes for all locals -lw $r0 $fp i74 ; Base register for method parameter -addi $r1 $r0 i0 ; Get address for arg s -lw $r3 $r0 i5 ; Get arg u -move $r2 $sp ; save register for temporary stack value -cfei i48 ; allocate 48 bytes for temporary struct -lw $r0 data_0 ; literal instantiation for aggregate field -sw $r2 $r0 i0 ; initialise aggregate field +lw $r0 $fp i73 ; load input function selector +lw $r1 data_8 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i12 ; jump to selected function +movi $$tmp i123 ; special code for mismatched selector +rvrt $$tmp ; revert if no selectors matched +lw $r0 $fp i74 ; base register for method parameter +addi $r1 $r0 i0 ; get address for arg s +lw $r5 $r0 i5 ; get arg u +move $r3 $sp ; save locals base register +cfei i256 ; allocate 256 bytes for locals +lw $r2 data_0 ; literal instantiation addi $r0 $r2 i8 ; get struct field(s) 1 offset mcpi $r0 $r1 i40 ; store struct field value lw $r1 $r2 i0 ; extract_value @ 0 -addi $r0 $r5 i0 ; get offset reg for get_ptr +addi $r0 $r3 i0 ; get offset reg for get_ptr lw $r0 data_1 ; literal instantiation -addi $r4 $r5 i0 ; get store offset +addi $r4 $r3 i0 ; get store offset mcpi $r4 $r0 i32 ; store value -addi $r0 $r5 i0 ; get offset +addi $r0 $r3 i0 ; get offset sww $r0 $r1 ; single word state access addi $r2 $r2 i8 ; extract address -addi $r0 $r5 i32 ; get offset reg for get_ptr +addi $r0 $r3 i32 ; get offset reg for get_ptr lw $r1 data_2 ; literal instantiation -addi $r0 $r5 i32 ; get store offset +addi $r0 $r3 i32 ; get store offset mcpi $r0 $r1 i32 ; store value -addi $r0 $r5 i128 ; get offset reg for get_ptr -addi $r0 $r5 i128 ; get store offset +addi $r0 $r3 i128 ; get offset reg for get_ptr +addi $r0 $r3 i128 ; get store offset mcpi $r0 $r2 i64 ; store value -addi $r0 $r5 i128 ; get offset reg for get_ptr -addi $r1 $r5 i128 ; get offset -addi $r0 $r5 i32 ; get offset +addi $r0 $r3 i128 ; get offset reg for get_ptr +addi $r1 $r3 i128 ; get offset +addi $r0 $r3 i32 ; get offset swwq $r0 $r1 ; quad word state access -addi $r0 $r5 i32 ; get offset reg for get_ptr +addi $r0 $r3 i32 ; get offset reg for get_ptr lw $r1 data_3 ; literal instantiation -addi $r0 $r5 i32 ; get store offset +addi $r0 $r3 i32 ; get store offset mcpi $r0 $r1 i32 ; store value -addi $r0 $r5 i160 ; get offset reg for get_ptr -addi $r1 $r5 i160 ; get offset -addi $r0 $r5 i32 ; get offset +addi $r0 $r3 i160 ; get offset reg for get_ptr +addi $r1 $r3 i160 ; get offset +addi $r0 $r3 i32 ; get offset swwq $r0 $r1 ; quad word state access -move $r4 $sp ; save register for temporary stack value -cfei i48 ; allocate 48 bytes for temporary struct -lw $r0 data_4 ; literal instantiation for aggregate field -sw $r4 $r0 i0 ; initialise aggregate field -sw $r4 $r3 i5 ; insert_value @ 1 +lw $r4 data_4 ; literal instantiation +sw $r4 $r5 i5 ; insert_value @ 1 lw $r2 $r4 i0 ; extract_value @ 0 -addi $r0 $r5 i64 ; get offset reg for get_ptr +addi $r0 $r3 i64 ; get offset reg for get_ptr lw $r1 data_5 ; literal instantiation -addi $r0 $r5 i64 ; get store offset +addi $r0 $r3 i64 ; get store offset mcpi $r0 $r1 i32 ; store value -addi $r0 $r5 i64 ; get offset +addi $r0 $r3 i64 ; get offset sww $r0 $r2 ; single word state access addi $r2 $r4 i8 ; extract address -addi $r0 $r5 i96 ; get offset reg for get_ptr +addi $r0 $r3 i96 ; get offset reg for get_ptr lw $r1 data_6 ; literal instantiation -addi $r0 $r5 i96 ; get store offset +addi $r0 $r3 i96 ; get store offset mcpi $r0 $r1 i32 ; store value -addi $r0 $r5 i192 ; get offset reg for get_ptr -addi $r0 $r5 i192 ; get store offset +addi $r0 $r3 i192 ; get offset reg for get_ptr +addi $r0 $r3 i192 ; get store offset mcpi $r0 $r2 i64 ; store value -addi $r0 $r5 i192 ; get offset reg for get_ptr -addi $r1 $r5 i192 ; get offset -addi $r0 $r5 i96 ; get offset +addi $r0 $r3 i192 ; get offset reg for get_ptr +addi $r1 $r3 i192 ; get offset +addi $r0 $r3 i96 ; get offset swwq $r0 $r1 ; quad word state access -addi $r0 $r5 i96 ; get offset reg for get_ptr +addi $r0 $r3 i96 ; get offset reg for get_ptr lw $r1 data_7 ; literal instantiation -addi $r0 $r5 i96 ; get store offset +addi $r0 $r3 i96 ; get store offset mcpi $r0 $r1 i32 ; store value -addi $r0 $r5 i224 ; get offset reg for get_ptr -addi $r1 $r5 i224 ; get offset -addi $r0 $r5 i96 ; get offset +addi $r0 $r3 i224 ; get offset reg for get_ptr +addi $r1 $r3 i224 ; get offset +addi $r0 $r3 i96 ; get offset swwq $r0 $r1 ; quad word state access ret $zero ; returning unit as zero -noop ; word-alignment of data section .data: -data_0 .u64 0x00 -data_1 .b256 0xd625ff6d8e88efd7bb3476e748e5d5935618d78bfc7eedf584fe909ce0809fc3 -data_2 .b256 0xc4f29cca5a7266ecbc35c82c55dd2b0059a3db4c83a3410653ec33aded8e9840 -data_3 .b256 0xc4f29cca5a7266ecbc35c82c55dd2b0059a3db4c83a3410653ec33aded8e9841 -data_4 .u64 0x01 -data_5 .b256 0x2817e0819d6fcad797114fbcf350fa281aca33a39b0abf977797bddd69b8e7af -data_6 .b256 0x12ea9b9b05214a0d64996d259c59202b80a21415bb68b83121353e2a5925ec47 -data_7 .b256 0x12ea9b9b05214a0d64996d259c59202b80a21415bb68b83121353e2a5925ec48 -data_8 .u32 0xc1c7877c +data_0 .collection { .word 0, .word 0 } +data_1 .bytes[32] d6 25 ff 6d 8e 88 ef d7 bb 34 76 e7 48 e5 d5 93 56 18 d7 8b fc 7e ed f5 84 fe 90 9c e0 80 9f c3 .%.m.....4v.H...V....~.......... +data_2 .bytes[32] c4 f2 9c ca 5a 72 66 ec bc 35 c8 2c 55 dd 2b 00 59 a3 db 4c 83 a3 41 06 53 ec 33 ad ed 8e 98 40 ....Zrf..5.,U.+.Y..L..A.S.3....@ +data_3 .bytes[32] c4 f2 9c ca 5a 72 66 ec bc 35 c8 2c 55 dd 2b 00 59 a3 db 4c 83 a3 41 06 53 ec 33 ad ed 8e 98 41 ....Zrf..5.,U.+.Y..L..A.S.3....A +data_4 .collection { .word 1, .word 0 } +data_5 .bytes[32] 28 17 e0 81 9d 6f ca d7 97 11 4f bc f3 50 fa 28 1a ca 33 a3 9b 0a bf 97 77 97 bd dd 69 b8 e7 af (....o....O..P.(..3.....w...i... +data_6 .bytes[32] 12 ea 9b 9b 05 21 4a 0d 64 99 6d 25 9c 59 20 2b 80 a2 14 15 bb 68 b8 31 21 35 3e 2a 59 25 ec 47 .....!J.d.m%.Y +.....h.1!5>*Y%.G +data_7 .bytes[32] 12 ea 9b 9b 05 21 4a 0d 64 99 6d 25 9c 59 20 2b 80 a2 14 15 bb 68 b8 31 21 35 3e 2a 59 25 ec 48 .....!J.d.m%.Y +.....h.1!5>*Y%.H +data_8 .word 3251079036 diff --git a/sway-core/tests/ir_to_asm/enum_padding.asm b/sway-core/tests/ir_to_asm/enum_padding.asm index 1ec87de7027..4ba4b98c939 100644 --- a/sway-core/tests/ir_to_asm/enum_padding.asm +++ b/sway-core/tests/ir_to_asm/enum_padding.asm @@ -5,25 +5,9 @@ DATA_SECTION_OFFSET[0..32] DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is -move $r1 $sp ; save register for temporary stack value -cfei i72 ; allocate 72 bytes for temporary struct -lw $r0 data_0 ; literal instantiation for aggregate field -sw $r1 $r0 i0 ; initialise aggregate field -addi $r0 $r1 i8 ; get base pointer for union -mcli $r0 i16 ; clear padding for union initialisation -lw $r0 data_1 ; literal instantiation for aggregate field -sw $r1 $r0 i3 ; initialise aggregate field -lw $r0 data_0 ; literal instantiation for aggregate field -sw $r1 $r0 i4 ; initialise aggregate field -addi $r0 $r1 i40 ; get base pointer for union -mcli $r0 i24 ; clear padding for union initialisation -lw $r0 data_2 ; literal instantiation for aggregate field -sw $r1 $r0 i8 ; initialise aggregate field -lw $r0 data_3 ; loading size for RETD +lw $r1 data_0 ; literal instantiation +lw $r0 data_1 ; loading size for RETD retd $r1 $r0 -noop ; word-alignment of data section .data: -data_0 .u64 0x01 -data_1 .u64 0x2a -data_2 .u64 0x42 -data_3 .u64 0x48 +data_0 .collection { .word 1, .collection { .word 42, .collection { .word 1, .word 66 } } } +data_1 .word 72 diff --git a/sway-core/tests/ir_to_asm/enum_struct_string.asm b/sway-core/tests/ir_to_asm/enum_struct_string.asm index bd447c78ab3..d80ecb90983 100644 --- a/sway-core/tests/ir_to_asm/enum_struct_string.asm +++ b/sway-core/tests/ir_to_asm/enum_struct_string.asm @@ -6,22 +6,19 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r4 $sp ; save locals base register -cfei i48 ; allocate 48 bytes for all locals -move $r3 $sp ; save register for temporary stack value -cfei i56 ; allocate 56 bytes for temporary struct +cfei i48 ; allocate 48 bytes for locals +lw $r3 data_0 ; literal instantiation sw $r3 $zero i0 ; insert_value @ 0 -move $r2 $sp ; save register for temporary stack value -cfei i32 ; allocate 32 bytes for temporary struct -lw $r1 data_0 ; literal instantiation +lw $r2 data_0 ; literal instantiation +lw $r1 data_1 ; literal instantiation addi $r0 $r2 i0 ; get struct field(s) 0 offset mcpi $r0 $r1 i24 ; store struct field value -lw $r0 data_1 ; literal instantiation +lw $r0 data_2 ; literal instantiation sw $r2 $r0 i3 ; insert_value @ 1 -move $r1 $sp ; save register for temporary stack value -cfei i48 ; allocate 48 bytes for temporary struct +lw $r1 data_3 ; literal instantiation addi $r0 $r1 i0 ; get struct field(s) 0 offset mcpi $r0 $r2 i32 ; store struct field value -lw $r0 data_2 ; literal instantiation +lw $r0 data_4 ; literal instantiation sw $r1 $r0 i4 ; insert_value @ 1 sw $r1 $zero i5 ; insert_value @ 2 addi $r0 $r3 i8 ; get struct field(s) 1 offset @@ -39,8 +36,9 @@ lw $r0 $r0 i4 ; extract_value @ 1 ji i40 move $r0 $zero ; branch to phi value ret $r0 -noop ; word-alignment of data section .data: -data_0 .str "î‚° an odd length" -data_1 .u64 0x14 -data_2 .u64 0x0a +data_0 .collection { .word 0, .word 0 } +data_1 .bytes[17] ee 82 b0 20 61 6e 20 6f 64 64 20 6c 65 6e 67 74 68 ... an odd length +data_2 .word 20 +data_3 .collection { .collection { .word 0, .word 0 }, .word 0, .word 0 } +data_4 .word 10 diff --git a/sway-core/tests/ir_to_asm/get_storage_key.asm b/sway-core/tests/ir_to_asm/get_storage_key.asm index 35b30957743..0b23ab1f0e5 100644 --- a/sway-core/tests/ir_to_asm/get_storage_key.asm +++ b/sway-core/tests/ir_to_asm/get_storage_key.asm @@ -5,24 +5,24 @@ DATA_SECTION_OFFSET[0..32] DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is -lw $r1 $fp i73 ; load input function selector -lw $r0 data_3 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i14 ; jump to selected function -lw $r0 data_4 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i18 ; jump to selected function -rvrt $zero ; revert if no selectors matched -lw $r0 data_0 ; literal instantiation -lw $r1 data_1 ; loading size for RETD -retd $r0 $r1 +lw $r0 $fp i73 ; load input function selector +lw $r1 data_3 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i15 ; jump to selected function +lw $r1 data_4 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i19 ; jump to selected function +movi $$tmp i123 ; special code for mismatched selector +rvrt $$tmp ; revert if no selectors matched +lw $r1 data_0 ; literal instantiation +lw $r0 data_1 ; loading size for RETD +retd $r1 $r0 lw $r1 data_2 ; literal instantiation lw $r0 data_1 ; loading size for RETD retd $r1 $r0 -noop ; word-alignment of data section .data: -data_0 .b256 0xf383b0ce51358be57daa3b725fe44acdb2d880604e367199080b4379c41bb6ed -data_1 .u64 0x20 -data_2 .b256 0xde9090cb50e71c2588c773487d1da7066d0c719849a7e58dc8b6397a25c567c0 -data_3 .u32 0x2994c98e -data_4 .u32 0xf57bdec8 +data_0 .bytes[32] f3 83 b0 ce 51 35 8b e5 7d aa 3b 72 5f e4 4a cd b2 d8 80 60 4e 36 71 99 08 0b 43 79 c4 1b b6 ed ....Q5..}.;r_.J....`N6q...Cy.... +data_1 .word 32 +data_2 .bytes[32] de 90 90 cb 50 e7 1c 25 88 c7 73 48 7d 1d a7 06 6d 0c 71 98 49 a7 e5 8d c8 b6 39 7a 25 c5 67 c0 ....P..%..sH}...m.q.I.....9z%.g. +data_3 .word 697616782 +data_4 .word 4118535880 diff --git a/sway-core/tests/ir_to_asm/get_storage_key.ir b/sway-core/tests/ir_to_asm/get_storage_key.ir index cd8eec038b0..11210c506e5 100644 --- a/sway-core/tests/ir_to_asm/get_storage_key.ir +++ b/sway-core/tests/ir_to_asm/get_storage_key.ir @@ -1,41 +1,28 @@ contract { - fn foo1<2994c98e>() -> b256 { - entry: - v0 = get_storage_key, !5 - br block0 - - block0: - v1 = phi(entry: v0) - ret b256 v1 - } + fn foo1<2994c98e>() -> b256, !1 { + local ptr { } __anon_0 - fn anon_0() -> b256 { entry: - v0 = get_storage_key, !1 + v0 = get_storage_key, !5 ret b256 v0 } - fn foo2() -> b256 { - entry: - v0 = get_storage_key, !6 - br block0 - - block0: - v1 = phi(entry: v0) - ret b256 v1 - } + fn foo2() -> b256, !6 { + local ptr { } __anon_0 - fn anon_1() -> b256 { entry: - v0 = get_storage_key, !3 + v0 = get_storage_key, !9 ret b256 v0 } } !0 = "/path/to/get_storage_key.sw" -!1 = span !0 72 91 -!2 = state_index 0 -!3 = span !0 72 91 -!4 = state_index 1 -!5 = (!1 !2) -!6 = (!3 !4) +!1 = span !0 287 337 +!2 = span !0 315 331 +!3 = state_index 0 +!4 = span !0 76 95 +!5 = (!2 !3 !4) +!6 = span !0 342 392 +!7 = span !0 370 386 +!8 = state_index 1 +!9 = (!7 !8 !4) diff --git a/sway-core/tests/ir_to_asm/if_expr.asm b/sway-core/tests/ir_to_asm/if_expr.asm index dfcd229c531..8f22a8352b4 100644 --- a/sway-core/tests/ir_to_asm/if_expr.asm +++ b/sway-core/tests/ir_to_asm/if_expr.asm @@ -13,5 +13,5 @@ lw $r0 data_1 ; literal instantiation ret $r0 noop ; word-alignment of data section .data: -data_0 .u64 0xf4240 -data_1 .u64 0x2a +data_0 .word 1000000 +data_1 .word 42 diff --git a/sway-core/tests/ir_to_asm/impl_ret_int.asm b/sway-core/tests/ir_to_asm/impl_ret_int.asm index 43f4bdf8058..bb9ac3dd3ea 100644 --- a/sway-core/tests/ir_to_asm/impl_ret_int.asm +++ b/sway-core/tests/ir_to_asm/impl_ret_int.asm @@ -9,4 +9,4 @@ lw $r0 data_0 ; literal instantiation ret $r0 noop ; word-alignment of data section .data: -data_0 .u64 0x2a +data_0 .word 42 diff --git a/sway-core/tests/ir_to_asm/lazy_binops.asm b/sway-core/tests/ir_to_asm/lazy_binops.asm index b4c9f5fbf2b..0b2222071cd 100644 --- a/sway-core/tests/ir_to_asm/lazy_binops.asm +++ b/sway-core/tests/ir_to_asm/lazy_binops.asm @@ -6,12 +6,10 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r0 $zero ; branch to phi value -move $r0 $zero ; branch to phi value -jnzi $zero i10 -ji i11 +jnzi $zero i9 +ji i10 move $r0 $one ; branch to phi value -move $r1 $r0 ; branch to phi value -jnzi $r0 i14 +jnzi $r0 i12 move $r0 $one ; branch to phi value ret $r0 .data: diff --git a/sway-core/tests/ir_to_asm/let_reassign_while_loop.asm b/sway-core/tests/ir_to_asm/let_reassign_while_loop.asm index 9103b8ca41d..50a8d3e2d3b 100644 --- a/sway-core/tests/ir_to_asm/let_reassign_while_loop.asm +++ b/sway-core/tests/ir_to_asm/let_reassign_while_loop.asm @@ -6,7 +6,7 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r2 $sp ; save locals base register -cfei i8 ; allocate 8 bytes for all locals +cfei i8 ; allocate 8 bytes for locals addi $r0 $r2 i0 ; get offset reg for get_ptr sw $r2 $one i0 ; store value addi $r0 $r2 i0 ; get offset reg for get_ptr @@ -14,12 +14,12 @@ lw $r0 $r2 i0 ; load value jnzi $r0 i14 ji i22 addi $r0 $r2 i0 ; get offset reg for get_ptr -lw $r1 $r2 i0 ; load value -jnzi $r1 i18 +lw $r0 $r2 i0 ; load value +jnzi $r0 i18 ji i19 -move $r1 $zero ; branch to phi value -addi $r0 $r2 i0 ; get offset reg for get_ptr -sw $r2 $r1 i0 ; store value +move $r0 $zero ; branch to phi value +addi $r1 $r2 i0 ; get offset reg for get_ptr +sw $r2 $r0 i0 ; store value ji i10 addi $r0 $r2 i0 ; get offset reg for get_ptr lw $r0 $r2 i0 ; load value diff --git a/sway-core/tests/ir_to_asm/logging.asm b/sway-core/tests/ir_to_asm/logging.asm index 8e281a15438..0ecaa25c31c 100644 --- a/sway-core/tests/ir_to_asm/logging.asm +++ b/sway-core/tests/ir_to_asm/logging.asm @@ -6,12 +6,9 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r2 $sp ; save locals base register -cfei i8 ; allocate 8 bytes for all locals +cfei i8 ; allocate 8 bytes for locals addi $r0 $r2 i0 ; get offset reg for get_ptr -move $r1 $sp ; save register for temporary stack value -cfei i8 ; allocate 8 bytes for temporary struct -lw $r0 data_0 ; literal instantiation for aggregate field -sw $r1 $r0 i0 ; initialise aggregate field +lw $r1 data_0 ; literal instantiation addi $r0 $r2 i0 ; get store offset mcpi $r0 $r1 i8 ; store value lw $r1 data_1 ; literal instantiation @@ -22,9 +19,10 @@ lw $r1 data_3 ; literal instantiation lw $r0 data_4 ; loading size for LOGD logd $zero $r1 $r2 $r0 ret $zero ; returning unit as zero +noop ; word-alignment of data section .data: -data_0 .u64 0x01 -data_1 .u64 0x2a -data_2 .u64 0xf891e -data_3 .u64 0xf8923 -data_4 .u64 0x08 +data_0 .collection { .word 1 } +data_1 .word 42 +data_2 .word 1018142 +data_3 .word 1018147 +data_4 .word 8 diff --git a/sway-core/tests/ir_to_asm/mutable_struct.asm b/sway-core/tests/ir_to_asm/mutable_struct.asm index 38279e830d2..97cab1900ab 100644 --- a/sway-core/tests/ir_to_asm/mutable_struct.asm +++ b/sway-core/tests/ir_to_asm/mutable_struct.asm @@ -6,23 +6,24 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r2 $sp ; save locals base register -cfei i16 ; allocate 16 bytes for all locals -move $r1 $sp ; save register for temporary stack value -cfei i16 ; allocate 16 bytes for temporary struct -lw $r0 data_0 ; literal instantiation -sw $r1 $r0 i0 ; insert_value @ 0 +cfei i16 ; allocate 16 bytes for locals +lw $r1 data_0 ; literal instantiation lw $r0 data_1 ; literal instantiation +sw $r1 $r0 i0 ; insert_value @ 0 +lw $r0 data_2 ; literal instantiation sw $r1 $r0 i1 ; insert_value @ 1 addi $r0 $r2 i0 ; get offset reg for get_ptr addi $r0 $r2 i0 ; get store offset mcpi $r0 $r1 i16 ; store value addi $r1 $r2 i0 ; get offset reg for get_ptr -lw $r0 data_2 ; literal instantiation +lw $r0 data_3 ; literal instantiation sw $r1 $r0 i0 ; insert_value @ 0 addi $r0 $r2 i0 ; get offset reg for get_ptr lw $r0 $r0 i1 ; extract_value @ 1 ret $r0 +noop ; word-alignment of data section .data: -data_0 .u64 0x28 -data_1 .u64 0x02 -data_2 .u64 0x32 +data_0 .collection { .word 0, .word 0 } +data_1 .word 40 +data_2 .word 2 +data_3 .word 50 diff --git a/sway-core/tests/ir_to_asm/nested_single_word_struct.asm b/sway-core/tests/ir_to_asm/nested_single_word_struct.asm index a7b51308a65..70e0ad99d52 100644 --- a/sway-core/tests/ir_to_asm/nested_single_word_struct.asm +++ b/sway-core/tests/ir_to_asm/nested_single_word_struct.asm @@ -5,14 +5,16 @@ DATA_SECTION_OFFSET[0..32] DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is -lw $r1 $fp i73 ; load input function selector -lw $r0 data_0 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i11 ; jump to selected function -rvrt $zero ; revert if no selectors matched -lw $r0 $fp i74 ; Base register for method parameter +lw $r0 $fp i73 ; load input function selector +lw $r1 data_0 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i12 ; jump to selected function +movi $$tmp i123 ; special code for mismatched selector +rvrt $$tmp ; revert if no selectors matched +lw $r0 $fp i74 ; base register for method parameter addi $r0 $r0 i0 ; extract address lw $r0 $r0 i0 ; extract_value @ 0 ret $r0 +noop ; word-alignment of data section .data: -data_0 .u32 0x495d4a23 +data_0 .word 1230850595 diff --git a/sway-core/tests/ir_to_asm/ret_string_in_struct.asm b/sway-core/tests/ir_to_asm/ret_string_in_struct.asm index ac29535ef68..6511c0c40b8 100644 --- a/sway-core/tests/ir_to_asm/ret_string_in_struct.asm +++ b/sway-core/tests/ir_to_asm/ret_string_in_struct.asm @@ -5,33 +5,32 @@ DATA_SECTION_OFFSET[0..32] DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is -lw $r1 $fp i73 ; load input function selector -lw $r0 data_4 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i14 ; jump to selected function -lw $r0 data_5 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i22 ; jump to selected function -rvrt $zero ; revert if no selectors matched -move $r2 $sp ; save register for temporary stack value -cfei i8 ; allocate 8 bytes for temporary struct -lw $r1 data_0 ; literal instantiation +lw $r0 $fp i73 ; load input function selector +lw $r1 data_5 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i15 ; jump to selected function +lw $r1 data_6 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i23 ; jump to selected function +movi $$tmp i123 ; special code for mismatched selector +rvrt $$tmp ; revert if no selectors matched +lw $r2 data_0 ; literal instantiation +lw $r1 data_1 ; literal instantiation addi $r0 $r2 i0 ; get struct field(s) 0 offset mcpi $r0 $r1 i8 ; store struct field value -lw $r0 data_1 ; loading size for RETD -retd $r2 $r0 -move $r2 $sp ; save register for temporary stack value -cfei i16 ; allocate 16 bytes for temporary struct -lw $r1 data_2 ; literal instantiation -addi $r0 $r2 i0 ; get struct field(s) 0 offset -mcpi $r0 $r1 i16 ; store struct field value -lw $r0 data_3 ; loading size for RETD +lw $r0 data_2 ; loading size for RETD retd $r2 $r0 -noop ; word-alignment of data section +lw $r1 data_0 ; literal instantiation +lw $r0 data_3 ; literal instantiation +addi $r2 $r1 i0 ; get struct field(s) 0 offset +mcpi $r2 $r0 i16 ; store struct field value +lw $r0 data_4 ; loading size for RETD +retd $r1 $r0 .data: -data_0 .str "foobar0" -data_1 .u64 0x08 -data_2 .str "foobarbaz" -data_3 .u64 0x10 -data_4 .u32 0x4a13be00 -data_5 .u32 0x29ea7974 +data_0 .collection { .word 0 } +data_1 .bytes[7] 66 6f 6f 62 61 72 30 foobar0 +data_2 .word 8 +data_3 .bytes[9] 66 6f 6f 62 61 72 62 61 7a foobarbaz +data_4 .word 16 +data_5 .word 1242807808 +data_6 .word 703232372 diff --git a/sway-core/tests/ir_to_asm/simple_array.asm b/sway-core/tests/ir_to_asm/simple_array.asm index 0cff8523798..5b5430660c5 100644 --- a/sway-core/tests/ir_to_asm/simple_array.asm +++ b/sway-core/tests/ir_to_asm/simple_array.asm @@ -6,16 +6,15 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r2 $sp ; save locals base register -cfei i24 ; allocate 24 bytes for all locals -move $r1 $sp ; save register for temporary stack value -cfei i24 ; allocate 24 bytes for temporary array +cfei i24 ; allocate 24 bytes for locals +lw $r1 data_0 ; literal instantiation muli $r0 $zero i8 ; insert_element relative offset add $r0 $r1 $r0 ; insert_element absolute offset sw $r0 $zero i0 ; insert_element muli $r0 $one i8 ; insert_element relative offset add $r0 $r1 $r0 ; insert_element absolute offset sw $r0 $one i0 ; insert_element -lw $r0 data_0 ; literal instantiation +lw $r0 data_1 ; literal instantiation muli $r0 $r0 i8 ; insert_element relative offset add $r0 $r1 $r0 ; insert_element absolute offset sw $r0 $zero i0 ; insert_element @@ -27,6 +26,6 @@ muli $r0 $one i8 ; extract_element relative offset add $r0 $r1 $r0 ; extract_element absolute offset lw $r0 $r0 i0 ; extract_element ret $r0 -noop ; word-alignment of data section .data: -data_0 .u64 0x02 +data_0 .collection { .word 0, .word 0, .word 0 } +data_1 .word 2 diff --git a/sway-core/tests/ir_to_asm/simple_contract.asm b/sway-core/tests/ir_to_asm/simple_contract.asm index 59a8fd5e783..d4cf6bbf42b 100644 --- a/sway-core/tests/ir_to_asm/simple_contract.asm +++ b/sway-core/tests/ir_to_asm/simple_contract.asm @@ -6,35 +6,36 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is lw $r0 $fp i73 ; load input function selector -lw $r1 data_2 ; load fn selector for comparison -eq $r1 $r0 $r1 ; function selector comparison -jnzi $r1 i17 ; jump to selected function lw $r1 data_3 ; load fn selector for comparison -eq $r1 $r0 $r1 ; function selector comparison -jnzi $r1 i19 ; jump to selected function +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i18 ; jump to selected function lw $r1 data_4 ; load fn selector for comparison -eq $r0 $r0 $r1 ; function selector comparison -jnzi $r0 i22 ; jump to selected function -rvrt $zero ; revert if no selectors matched -lw $r0 $fp i74 ; Base register for method parameter +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i20 ; jump to selected function +lw $r1 data_5 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i23 ; jump to selected function +movi $$tmp i123 ; special code for mismatched selector +rvrt $$tmp ; revert if no selectors matched +lw $r0 $fp i74 ; base register for method parameter ret $r0 -lw $r1 $fp i74 ; Base register for method parameter +lw $r1 $fp i74 ; base register for method parameter lw $r0 data_0 ; loading size for RETD retd $r1 $r0 -lw $r1 $fp i74 ; Base register for method parameter -lw $r0 $r1 i0 ; Get arg val1 -addi $r2 $r1 i8 ; Get address for arg val2 -move $r1 $sp ; save register for temporary stack value -cfei i40 ; allocate 40 bytes for temporary struct -sw $r1 $r0 i0 ; insert_value @ 0 -addi $r0 $r1 i8 ; get struct field(s) 1 offset -mcpi $r0 $r2 i32 ; store struct field value -lw $r0 data_1 ; loading size for RETD -retd $r1 $r0 +lw $r0 $fp i74 ; base register for method parameter +lw $r1 $r0 i0 ; get arg val1 +addi $r0 $r0 i8 ; get address for arg val2 +lw $r2 data_1 ; literal instantiation +sw $r2 $r1 i0 ; insert_value @ 0 +addi $r1 $r2 i8 ; get struct field(s) 1 offset +mcpi $r1 $r0 i32 ; store struct field value +lw $r0 data_2 ; loading size for RETD +retd $r2 $r0 noop ; word-alignment of data section .data: -data_0 .u64 0x20 -data_1 .u64 0x28 -data_2 .u32 0x9890aef4 -data_3 .u32 0x42123b96 -data_4 .u32 0xfc62d029 +data_0 .word 32 +data_1 .collection { .word 0, .word 0 } +data_2 .word 40 +data_3 .word 2559618804 +data_4 .word 1108491158 +data_5 .word 4234334249 diff --git a/sway-core/tests/ir_to_asm/simple_contract_call.asm b/sway-core/tests/ir_to_asm/simple_contract_call.asm index 3ac7f43ca56..7062d961fc4 100644 --- a/sway-core/tests/ir_to_asm/simple_contract_call.asm +++ b/sway-core/tests/ir_to_asm/simple_contract_call.asm @@ -6,77 +6,76 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r3 $sp ; save locals base register -cfei i160 ; allocate 160 bytes for all locals +cfei i160 ; allocate 160 bytes for locals addi $r1 $r3 i80 ; get offset reg for get_ptr lw $r0 data_0 ; literal instantiation sw $r1 $r0 i0 ; insert_value @ 0 -move $r2 $sp ; save register for temporary stack value -cfei i48 ; allocate 48 bytes for temporary struct -lw $r1 data_1 ; literal instantiation +lw $r2 data_1 ; literal instantiation +lw $r1 data_2 ; literal instantiation addi $r0 $r2 i0 ; get struct field(s) 0 offset mcpi $r0 $r1 i32 ; store struct field value -lw $r0 data_2 ; literal instantiation +lw $r0 data_3 ; literal instantiation sw $r2 $r0 i4 ; insert_value @ 1 addi $r0 $r3 i80 ; get offset reg for get_ptr sw $r2 $r0 i5 ; insert_value @ 2 -lw $r1 data_3 ; literal instantiation lw $r0 data_4 ; literal instantiation -call $r2 $zero $r1 $r0 ; call external contract -move $r1 $ret +lw $r1 data_5 ; literal instantiation +call $r2 $zero $r0 $r1 ; call external contract +move $r1 $ret ; save call result addi $r0 $r3 i0 ; get offset reg for get_ptr sw $r3 $r1 i0 ; store value addi $r0 $r3 i8 ; get offset reg for get_ptr -lw $r1 data_5 ; literal instantiation +lw $r1 data_6 ; literal instantiation addi $r0 $r0 i0 ; get struct field(s) 0 offset mcpi $r0 $r1 i32 ; store struct field value -move $r2 $sp ; save register for temporary stack value -cfei i48 ; allocate 48 bytes for temporary struct -lw $r1 data_1 ; literal instantiation +lw $r2 data_1 ; literal instantiation +lw $r1 data_2 ; literal instantiation addi $r0 $r2 i0 ; get struct field(s) 0 offset mcpi $r0 $r1 i32 ; store struct field value -lw $r0 data_6 ; literal instantiation +lw $r0 data_7 ; literal instantiation sw $r2 $r0 i4 ; insert_value @ 1 addi $r0 $r3 i8 ; get offset reg for get_ptr sw $r2 $r0 i5 ; insert_value @ 2 -lw $r1 data_3 ; literal instantiation -lw $r0 data_7 ; literal instantiation +lw $r1 data_4 ; literal instantiation +lw $r0 data_8 ; literal instantiation call $r2 $zero $r1 $r0 ; call external contract -move $r1 $ret +move $r1 $ret ; save call result addi $r0 $r3 i88 ; get offset reg for get_ptr addi $r0 $r3 i88 ; get store offset mcpi $r0 $r1 i32 ; store value addi $r2 $r3 i40 ; get offset reg for get_ptr -lw $r0 data_8 ; literal instantiation +lw $r0 data_9 ; literal instantiation sw $r2 $r0 i0 ; insert_value @ 0 -lw $r1 data_9 ; literal instantiation +lw $r1 data_10 ; literal instantiation addi $r0 $r2 i8 ; get struct field(s) 1 offset mcpi $r0 $r1 i32 ; store struct field value -move $r2 $sp ; save register for temporary stack value -cfei i48 ; allocate 48 bytes for temporary struct -lw $r1 data_1 ; literal instantiation +lw $r2 data_1 ; literal instantiation +lw $r1 data_2 ; literal instantiation addi $r0 $r2 i0 ; get struct field(s) 0 offset mcpi $r0 $r1 i32 ; store struct field value -lw $r0 data_10 ; literal instantiation +lw $r0 data_11 ; literal instantiation sw $r2 $r0 i4 ; insert_value @ 1 addi $r0 $r3 i40 ; get offset reg for get_ptr sw $r2 $r0 i5 ; insert_value @ 2 move $r1 $cgas ; move register into abi function -lw $r0 data_3 ; literal instantiation +lw $r0 data_4 ; literal instantiation call $r2 $zero $r0 $r1 ; call external contract -move $r1 $ret +move $r1 $ret ; save call result addi $r0 $r3 i120 ; get offset reg for get_ptr addi $r0 $r3 i120 ; get store offset mcpi $r0 $r1 i40 ; store value ret $zero +noop ; word-alignment of data section .data: -data_0 .u64 0x457 -data_1 .b256 0x0c1c50c2bf5ba4bb351b4249a2f5e7d86556fcb4a6ae90465ff6c86126eeb3c0 -data_2 .u64 0x9890aef4 -data_3 .b256 0x0000000000000000000000000000000000000000000000000000000000000000 -data_4 .u64 0x2710 -data_5 .b256 0x3333333333333333333333333333333333333333333333333333333333333333 -data_6 .u64 0x42123b96 -data_7 .u64 0x4e20 -data_8 .u64 0x15b3 -data_9 .b256 0x5555555555555555555555555555555555555555555555555555555555555555 -data_10 .u64 0xfc62d029 +data_0 .word 1111 +data_1 .collection { .word 0, .word 0, .word 0 } +data_2 .bytes[32] 0c 1c 50 c2 bf 5b a4 bb 35 1b 42 49 a2 f5 e7 d8 65 56 fc b4 a6 ae 90 46 5f f6 c8 61 26 ee b3 c0 ..P..[..5.BI....eV.....F_..a&... +data_3 .word 2559618804 +data_4 .bytes[32] 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................................ +data_5 .word 10000 +data_6 .bytes[32] 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33 33333333333333333333333333333333 +data_7 .word 1108491158 +data_8 .word 20000 +data_9 .word 5555 +data_10 .bytes[32] 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 55 UUUUUUUUUUUUUUUUUUUUUUUUUUUUUUUU +data_11 .word 4234334249 diff --git a/sway-core/tests/ir_to_asm/simple_enum.asm b/sway-core/tests/ir_to_asm/simple_enum.asm index e9c0e688fa7..f1c6d3a0a99 100644 --- a/sway-core/tests/ir_to_asm/simple_enum.asm +++ b/sway-core/tests/ir_to_asm/simple_enum.asm @@ -6,22 +6,21 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r2 $sp ; save locals base register -cfei i16 ; allocate 16 bytes for all locals -move $r1 $sp ; save register for temporary stack value -cfei i16 ; allocate 16 bytes for temporary struct +cfei i16 ; allocate 16 bytes for locals +lw $r1 data_0 ; literal instantiation sw $r1 $one i0 ; insert_value @ 0 addi $r0 $r2 i0 ; get offset reg for get_ptr addi $r0 $r2 i0 ; get store offset mcpi $r0 $r1 i16 ; store value addi $r0 $r2 i0 ; get offset reg for get_ptr -move $r1 $sp ; save register for temporary stack value -cfei i16 ; allocate 16 bytes for temporary struct -lw $r0 data_0 ; literal instantiation -sw $r1 $r0 i0 ; insert_value @ 0 +lw $r1 data_0 ; literal instantiation lw $r0 data_1 ; literal instantiation +sw $r1 $r0 i0 ; insert_value @ 0 +lw $r0 data_2 ; literal instantiation sw $r1 $r0 i1 ; insert_value @ 1 ret $zero ; returning unit as zero noop ; word-alignment of data section .data: -data_0 .u64 0x02 -data_1 .u64 0x03 +data_0 .collection { .word 0, .word 0 } +data_1 .word 2 +data_2 .word 3 diff --git a/sway-core/tests/ir_to_asm/simple_if_let.asm b/sway-core/tests/ir_to_asm/simple_if_let.asm index cb707728199..fce45f0cf39 100644 --- a/sway-core/tests/ir_to_asm/simple_if_let.asm +++ b/sway-core/tests/ir_to_asm/simple_if_let.asm @@ -6,9 +6,8 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r2 $sp ; save locals base register -cfei i24 ; allocate 24 bytes for all locals -move $r1 $sp ; save register for temporary stack value -cfei i16 ; allocate 16 bytes for temporary struct +cfei i24 ; allocate 24 bytes for locals +lw $r1 data_0 ; literal instantiation sw $r1 $zero i0 ; insert_value @ 0 sw $r1 $one i1 ; insert_value @ 1 addi $r0 $r2 i8 ; get offset reg for get_ptr @@ -27,5 +26,5 @@ lw $r0 $r2 i0 ; load value ji i27 move $r0 $zero ; branch to phi value ret $r0 -noop ; word-alignment of data section .data: +data_0 .collection { .word 0, .word 0 } diff --git a/sway-core/tests/ir_to_asm/simple_struct.asm b/sway-core/tests/ir_to_asm/simple_struct.asm index 7e12df97fbd..abc8608b796 100644 --- a/sway-core/tests/ir_to_asm/simple_struct.asm +++ b/sway-core/tests/ir_to_asm/simple_struct.asm @@ -6,12 +6,11 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r2 $sp ; save locals base register -cfei i16 ; allocate 16 bytes for all locals -move $r1 $sp ; save register for temporary stack value -cfei i16 ; allocate 16 bytes for temporary struct -lw $r0 data_0 ; literal instantiation -sw $r1 $r0 i0 ; insert_value @ 0 +cfei i16 ; allocate 16 bytes for locals +lw $r1 data_0 ; literal instantiation lw $r0 data_1 ; literal instantiation +sw $r1 $r0 i0 ; insert_value @ 0 +lw $r0 data_2 ; literal instantiation sw $r1 $r0 i1 ; insert_value @ 1 addi $r0 $r2 i0 ; get offset reg for get_ptr addi $r0 $r2 i0 ; get store offset @@ -19,7 +18,7 @@ mcpi $r0 $r1 i16 ; store value addi $r0 $r2 i0 ; get offset reg for get_ptr lw $r0 $r0 i0 ; extract_value @ 0 ret $r0 -noop ; word-alignment of data section .data: -data_0 .u64 0x28 -data_1 .u64 0x02 +data_0 .collection { .word 0, .word 0 } +data_1 .word 40 +data_2 .word 2 diff --git a/sway-core/tests/ir_to_asm/storage_load.asm b/sway-core/tests/ir_to_asm/storage_load.asm index ce5c54d4b91..86cf2a05aa9 100644 --- a/sway-core/tests/ir_to_asm/storage_load.asm +++ b/sway-core/tests/ir_to_asm/storage_load.asm @@ -5,16 +5,17 @@ DATA_SECTION_OFFSET[0..32] DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is -lw $r1 $fp i73 ; load input function selector -lw $r0 data_3 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i14 ; jump to selected function -lw $r0 data_4 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i28 ; jump to selected function -rvrt $zero ; revert if no selectors matched +lw $r0 $fp i73 ; load input function selector +lw $r1 data_3 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i15 ; jump to selected function +lw $r1 data_4 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i29 ; jump to selected function +movi $$tmp i123 ; special code for mismatched selector +rvrt $$tmp ; revert if no selectors matched move $r2 $sp ; save locals base register -cfei i40 ; allocate 40 bytes for all locals +cfei i40 ; allocate 40 bytes for locals addi $r0 $r2 i0 ; get offset reg for get_ptr lw $r1 data_0 ; literal instantiation addi $r0 $r2 i0 ; get store offset @@ -26,23 +27,24 @@ sw $r2 $r0 i4 ; store value addi $r0 $r2 i32 ; get offset reg for get_ptr lw $r0 $r2 i4 ; load value ret $r0 -move $r0 $sp ; save locals base register -cfei i64 ; allocate 64 bytes for all locals -addi $r1 $r0 i0 ; get offset reg for get_ptr -lw $r2 data_1 ; literal instantiation -addi $r1 $r0 i0 ; get store offset -mcpi $r1 $r2 i32 ; store value -addi $r1 $r0 i32 ; get offset reg for get_ptr -addi $r2 $r0 i32 ; get offset -addi $r1 $r0 i0 ; get offset -srwq $r2 $r1 ; quad word state access -addi $r1 $r0 i32 ; get offset reg for get_ptr -addi $r1 $r0 i32 ; load address +move $r2 $sp ; save locals base register +cfei i64 ; allocate 64 bytes for locals +addi $r0 $r2 i0 ; get offset reg for get_ptr +lw $r1 data_1 ; literal instantiation +addi $r0 $r2 i0 ; get store offset +mcpi $r0 $r1 i32 ; store value +addi $r0 $r2 i32 ; get offset reg for get_ptr +addi $r1 $r2 i32 ; get offset +addi $r0 $r2 i0 ; get offset +srwq $r1 $r0 ; quad word state access +addi $r0 $r2 i32 ; get offset reg for get_ptr +addi $r1 $r2 i32 ; load address lw $r0 data_2 ; loading size for RETD retd $r1 $r0 +noop ; word-alignment of data section .data: -data_0 .b256 0x7fbd1192666bfac3767b890bd4d048c940879d316071e20c7c8c81bce2ca41c5 -data_1 .b256 0xa15d6d36b54df993ed1fbe4544a45d4c4f70d81b4229861dfde0e20eb652202c -data_2 .u64 0x20 -data_3 .u32 0x8e277065 -data_4 .u32 0x449e8e93 +data_0 .bytes[32] 7f bd 11 92 66 6b fa c3 76 7b 89 0b d4 d0 48 c9 40 87 9d 31 60 71 e2 0c 7c 8c 81 bc e2 ca 41 c5 ....fk..v{....H.@..1`q..|.....A. +data_1 .bytes[32] a1 5d 6d 36 b5 4d f9 93 ed 1f be 45 44 a4 5d 4c 4f 70 d8 1b 42 29 86 1d fd e0 e2 0e b6 52 20 2c .]m6.M.....ED.]LOp..B).......R , +data_2 .word 32 +data_3 .word 2384949349 +data_4 .word 1151241875 diff --git a/sway-core/tests/ir_to_asm/storage_store.asm b/sway-core/tests/ir_to_asm/storage_store.asm index f695d8198b5..34240261441 100644 --- a/sway-core/tests/ir_to_asm/storage_store.asm +++ b/sway-core/tests/ir_to_asm/storage_store.asm @@ -5,16 +5,17 @@ DATA_SECTION_OFFSET[0..32] DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is -lw $r1 $fp i73 ; load input function selector -lw $r0 data_3 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i14 ; jump to selected function -lw $r0 data_4 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i26 ; jump to selected function -rvrt $zero ; revert if no selectors matched +lw $r0 $fp i73 ; load input function selector +lw $r1 data_3 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i15 ; jump to selected function +lw $r1 data_4 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i27 ; jump to selected function +movi $$tmp i123 ; special code for mismatched selector +rvrt $$tmp ; revert if no selectors matched move $r2 $sp ; save locals base register -cfei i40 ; allocate 40 bytes for all locals +cfei i40 ; allocate 40 bytes for locals addi $r0 $r2 i32 ; get offset reg for get_ptr sw $r2 $zero i4 ; store value addi $r0 $r2 i0 ; get offset reg for get_ptr @@ -24,25 +25,24 @@ mcpi $r0 $r1 i32 ; store value addi $r0 $r2 i0 ; get offset sww $r0 $zero ; single word state access ret $zero ; returning unit as zero -move $r2 $sp ; save locals base register -cfei i64 ; allocate 64 bytes for all locals -addi $r0 $r2 i32 ; get offset reg for get_ptr -lw $r1 data_1 ; literal instantiation -addi $r0 $r2 i32 ; get store offset -mcpi $r0 $r1 i32 ; store value -addi $r0 $r2 i0 ; get offset reg for get_ptr -lw $r1 data_2 ; literal instantiation -addi $r0 $r2 i0 ; get store offset -mcpi $r0 $r1 i32 ; store value -addi $r0 $r2 i32 ; get offset reg for get_ptr -addi $r1 $r2 i32 ; get offset -addi $r0 $r2 i0 ; get offset -swwq $r0 $r1 ; quad word state access +move $r1 $sp ; save locals base register +cfei i64 ; allocate 64 bytes for locals +addi $r0 $r1 i32 ; get offset reg for get_ptr +lw $r2 data_1 ; literal instantiation +addi $r0 $r1 i32 ; get store offset +mcpi $r0 $r2 i32 ; store value +addi $r0 $r1 i0 ; get offset reg for get_ptr +lw $r2 data_2 ; literal instantiation +addi $r0 $r1 i0 ; get store offset +mcpi $r0 $r2 i32 ; store value +addi $r0 $r1 i32 ; get offset reg for get_ptr +addi $r2 $r1 i32 ; get offset +addi $r0 $r1 i0 ; get offset +swwq $r0 $r2 ; quad word state access ret $zero ; returning unit as zero -noop ; word-alignment of data section .data: -data_0 .b256 0x7fbd1192666bfac3767b890bd4d048c940879d316071e20c7c8c81bce2ca41c5 -data_1 .b256 0x0000000000000000000000000000000000000000000000000000000000000000 -data_2 .b256 0xa15d6d36b54df993ed1fbe4544a45d4c4f70d81b4229861dfde0e20eb652202c -data_3 .u32 0x1b9b478f -data_4 .u32 0x858a3d18 +data_0 .bytes[32] 7f bd 11 92 66 6b fa c3 76 7b 89 0b d4 d0 48 c9 40 87 9d 31 60 71 e2 0c 7c 8c 81 bc e2 ca 41 c5 ....fk..v{....H.@..1`q..|.....A. +data_1 .bytes[32] 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................................ +data_2 .bytes[32] a1 5d 6d 36 b5 4d f9 93 ed 1f be 45 44 a4 5d 4c 4f 70 d8 1b 42 29 86 1d fd e0 e2 0e b6 52 20 2c .]m6.M.....ED.]LOp..B).......R , +data_3 .word 463161231 +data_4 .word 2240429336 diff --git a/sway-core/tests/ir_to_asm/storage_store_load_intrinsics.asm b/sway-core/tests/ir_to_asm/storage_store_load_intrinsics.asm index 6147345d266..4b31f1f926f 100644 --- a/sway-core/tests/ir_to_asm/storage_store_load_intrinsics.asm +++ b/sway-core/tests/ir_to_asm/storage_store_load_intrinsics.asm @@ -5,42 +5,43 @@ DATA_SECTION_OFFSET[0..32] DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is -lw $r1 $fp i73 ; load input function selector -lw $r0 data_13 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i11 ; jump to selected function -rvrt $zero ; revert if no selectors matched +lw $r0 $fp i73 ; load input function selector +lw $r1 data_7 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i12 ; jump to selected function +movi $$tmp i123 ; special code for mismatched selector +rvrt $$tmp ; revert if no selectors matched move $r4 $sp ; save locals base register -cfei i288 ; allocate 288 bytes for all locals +cfei i288 ; allocate 288 bytes for locals lw $r0 data_0 ; literal instantiation addi $r1 $r4 i240 ; get offset reg for get_ptr addi $r1 $r4 i240 ; get store offset mcpi $r1 $r0 i32 ; store value eq $r0 $zero $zero ; asm block -jnzi $r0 i21 -ji i31 +jnzi $r0 i22 +ji i32 addi $r0 $r4 i240 ; get offset reg for get_ptr -addi $r3 $r4 i240 ; load address +addi $r2 $r4 i240 ; load address lw $r1 data_1 ; literal instantiation lw $r0 data_2 ; literal instantiation -move $r2 $sp ; asm block +move $r3 $sp ; asm block cfei i8 ; asm block -sw $r2 $r1 i0 ; asm block -s256 $r3 $r2 $r0 ; asm block +sw $r3 $r1 i0 ; asm block +s256 $r2 $r3 $r0 ; asm block cfsi i8 ; asm block -ji i40 +ji i41 addi $r0 $r4 i272 ; get offset reg for get_ptr lw $r0 data_2 ; literal instantiation sw $r4 $r0 i34 ; store value addi $r0 $r4 i240 ; get offset reg for get_ptr -addi $r3 $r4 i240 ; load address +addi $r2 $r4 i240 ; load address addi $r0 $r4 i272 ; get offset reg for get_ptr lw $r1 $r4 i34 ; load value lw $r0 data_1 ; literal instantiation -s256 $r3 $r0 $r1 ; asm block +s256 $r2 $r0 $r1 ; asm block addi $r0 $r4 i0 ; get offset reg for get_ptr addi $r0 $r4 i0 ; get store offset -mcpi $r0 $r3 i32 ; store value +mcpi $r0 $r2 i32 ; store value addi $r0 $r4 i280 ; get offset reg for get_ptr lw $r0 data_3 ; literal instantiation sw $r4 $r0 i35 ; store value @@ -64,37 +65,16 @@ addi $r0 $r4 i280 ; get offset reg for get_ptr lw $r0 $r4 i35 ; load value eq $r0 $r1 $r0 eq $r0 $r0 $zero ; asm block -jnzi $r0 i68 +jnzi $r0 i69 ji i71 rvrt $zero ; asm block -move $r0 $zero ; branch to phi value -ji i72 -move $r0 $zero ; branch to phi value -move $r0 $zero ; branch to phi value +ji i71 addi $r0 $r4 i160 ; get offset reg for get_ptr -move $r1 $sp ; save register for temporary stack value -cfei i32 ; allocate 32 bytes for temporary struct -lw $r0 data_4 ; literal instantiation for aggregate field -sw $r1 $r0 i0 ; initialise aggregate field -lw $r0 data_5 ; literal instantiation for aggregate field -sw $r1 $r0 i1 ; initialise aggregate field -lw $r0 data_6 ; literal instantiation for aggregate field -sw $r1 $r0 i2 ; initialise aggregate field -lw $r0 data_7 ; literal instantiation for aggregate field -sw $r1 $r0 i3 ; initialise aggregate field +lw $r1 data_4 ; literal instantiation addi $r0 $r4 i160 ; get store offset mcpi $r0 $r1 i32 ; store value addi $r0 $r4 i200 ; get offset reg for get_ptr -move $r1 $sp ; save register for temporary stack value -cfei i32 ; allocate 32 bytes for temporary struct -lw $r0 data_8 ; literal instantiation for aggregate field -sw $r1 $r0 i0 ; initialise aggregate field -lw $r0 data_9 ; literal instantiation for aggregate field -sw $r1 $r0 i1 ; initialise aggregate field -lw $r0 data_10 ; literal instantiation for aggregate field -sw $r1 $r0 i2 ; initialise aggregate field -lw $r0 data_11 ; literal instantiation for aggregate field -sw $r1 $r0 i3 ; initialise aggregate field +lw $r1 data_5 ; literal instantiation addi $r0 $r4 i200 ; get store offset mcpi $r0 $r1 i32 ; store value addi $r1 $r4 i160 ; get offset reg for get_ptr @@ -126,50 +106,190 @@ lw $r1 $r0 i0 ; extract_value @ 0 addi $r0 $r4 i200 ; get offset reg for get_ptr lw $r0 $r0 i0 ; extract_value @ 0 eq $r0 $r1 $r0 -jnzi $r0 i130 -ji i135 +jnzi $r0 i112 +ji i117 addi $r0 $r4 i160 ; get offset reg for get_ptr lw $r1 $r0 i1 ; extract_value @ 1 addi $r0 $r4 i200 ; get offset reg for get_ptr lw $r0 $r0 i1 ; extract_value @ 1 eq $r0 $r1 $r0 -jnzi $r0 i137 -ji i142 +jnzi $r0 i119 +ji i124 addi $r0 $r4 i160 ; get offset reg for get_ptr lw $r1 $r0 i2 ; extract_value @ 2 addi $r0 $r4 i200 ; get offset reg for get_ptr lw $r0 $r0 i2 ; extract_value @ 2 eq $r0 $r1 $r0 -jnzi $r0 i144 -ji i149 +jnzi $r0 i126 +ji i131 addi $r0 $r4 i160 ; get offset reg for get_ptr lw $r1 $r0 i3 ; extract_value @ 3 addi $r0 $r4 i200 ; get offset reg for get_ptr lw $r0 $r0 i3 ; extract_value @ 3 eq $r0 $r1 $r0 eq $r0 $r0 $zero ; asm block -jnzi $r0 i152 -ji i155 +jnzi $r0 i134 +ji i136 rvrt $zero ; asm block -move $r0 $zero ; branch to phi value -ji i156 -move $r0 $zero ; branch to phi value -move $r0 $zero ; branch to phi value -lw $r0 data_12 ; literal instantiation +ji i136 +lw $r0 data_6 ; literal instantiation ret $r0 -noop ; word-alignment of data section +move $$tmp $sp ; save base stack value +cfei i16 ; reserve space for saved registers +sw $$tmp $r0 i0 ; save $r0 +sw $$tmp $r1 i1 ; save $r1 +move $r0 $$arg0 ; save arg 0 +move $r1 $$reta ; save reta +eq $r0 $r0 $zero ; asm block +move $$retv $r0 ; set return value +move $$reta $r1 ; restore reta +subi $$tmp $sp i16 ; save base stack value +lw $r0 $$tmp i0 ; restore $r0 +lw $r1 $$tmp i1 ; restore $r1 +cfsi i16 ; recover space from saved registers +jmp $$reta ; return from call +move $$tmp $sp ; save base stack value +cfei i16 ; reserve space for saved registers +sw $$tmp $r0 i0 ; save $r0 +sw $$tmp $r1 i1 ; save $r1 +move $r0 $$arg0 ; save arg 0 +move $r1 $$reta ; save reta +move $$arg0 $r0 ; pass arg 0 +movi $$reta i161 ; set new return addr +ji i175 ; call not_4 +move $r0 $$retv ; copy the return value +jnzi $r0 i164 +ji i168 +move $$arg0 $zero ; pass arg 0 +movi $$reta i167 ; set new return addr +ji i189 ; call revert_5 +ji i168 +move $$retv $zero ; set return value +move $$reta $r1 ; restore reta +subi $$tmp $sp i16 ; save base stack value +lw $r0 $$tmp i0 ; restore $r0 +lw $r1 $$tmp i1 ; restore $r1 +cfsi i16 ; recover space from saved registers +jmp $$reta ; return from call +move $$tmp $sp ; save base stack value +cfei i16 ; reserve space for saved registers +sw $$tmp $r0 i0 ; save $r0 +sw $$tmp $r1 i1 ; save $r1 +move $r0 $$arg0 ; save arg 0 +move $r1 $$reta ; save reta +eq $r0 $r0 $zero ; asm block +move $$retv $r0 ; set return value +move $$reta $r1 ; restore reta +subi $$tmp $sp i16 ; save base stack value +lw $r0 $$tmp i0 ; restore $r0 +lw $r1 $$tmp i1 ; restore $r1 +cfsi i16 ; recover space from saved registers +jmp $$reta ; return from call +move $$tmp $sp ; save base stack value +cfei i16 ; reserve space for saved registers +sw $$tmp $r0 i0 ; save $r0 +sw $$tmp $r1 i1 ; save $r1 +move $r1 $$arg0 ; save arg 0 +move $r0 $$reta ; save reta +rvrt $r1 ; asm block +move $$retv $zero ; set return value +move $$reta $r0 ; restore reta +subi $$tmp $sp i16 ; save base stack value +lw $r0 $$tmp i0 ; restore $r0 +lw $r1 $$tmp i1 ; restore $r1 +cfsi i16 ; recover space from saved registers +jmp $$reta ; return from call +move $$tmp $sp ; save base stack value +cfei i24 ; reserve space for saved registers +sw $$tmp $r0 i0 ; save $r0 +sw $$tmp $r1 i1 ; save $r1 +sw $$tmp $r2 i2 ; save $r2 +move $r2 $$arg0 ; save arg 0 +move $r0 $$arg1 ; save arg 1 +move $r1 $$reta ; save reta +eq $r0 $r2 $r0 +move $$retv $r0 ; set return value +move $$reta $r1 ; restore reta +subi $$tmp $sp i24 ; save base stack value +lw $r0 $$tmp i0 ; restore $r0 +lw $r1 $$tmp i1 ; restore $r1 +lw $r2 $$tmp i2 ; restore $r2 +cfsi i24 ; recover space from saved registers +jmp $$reta ; return from call +move $$tmp $sp ; save base stack value +cfei i24 ; reserve space for saved registers +sw $$tmp $r0 i0 ; save $r0 +sw $$tmp $r1 i1 ; save $r1 +sw $$tmp $r2 i2 ; save $r2 +move $r2 $$arg0 ; save arg 0 +move $r0 $$arg1 ; save arg 1 +move $r1 $$reta ; save reta +eq $r0 $r2 $r0 +move $$retv $r0 ; set return value +move $$reta $r1 ; restore reta +subi $$tmp $sp i24 ; save base stack value +lw $r0 $$tmp i0 ; restore $r0 +lw $r1 $$tmp i1 ; restore $r1 +lw $r2 $$tmp i2 ; restore $r2 +cfsi i24 ; recover space from saved registers +jmp $$reta ; return from call +move $$tmp $sp ; save base stack value +cfei i24 ; reserve space for saved registers +sw $$tmp $r0 i0 ; save $r0 +sw $$tmp $r1 i1 ; save $r1 +sw $$tmp $r2 i2 ; save $r2 +move $r2 $$arg0 ; save arg 0 +move $r0 $$arg1 ; save arg 1 +move $r1 $$reta ; save reta +eq $r0 $r2 $r0 +move $$retv $r0 ; set return value +move $$reta $r1 ; restore reta +subi $$tmp $sp i24 ; save base stack value +lw $r0 $$tmp i0 ; restore $r0 +lw $r1 $$tmp i1 ; restore $r1 +lw $r2 $$tmp i2 ; restore $r2 +cfsi i24 ; recover space from saved registers +jmp $$reta ; return from call +move $$tmp $sp ; save base stack value +cfei i24 ; reserve space for saved registers +sw $$tmp $r0 i0 ; save $r0 +sw $$tmp $r1 i1 ; save $r1 +sw $$tmp $r2 i2 ; save $r2 +move $r2 $$arg0 ; save arg 0 +move $r0 $$arg1 ; save arg 1 +move $r1 $$reta ; save reta +eq $r0 $r2 $r0 +move $$retv $r0 ; set return value +move $$reta $r1 ; restore reta +subi $$tmp $sp i24 ; save base stack value +lw $r0 $$tmp i0 ; restore $r0 +lw $r1 $$tmp i1 ; restore $r1 +lw $r2 $$tmp i2 ; restore $r2 +cfsi i24 ; recover space from saved registers +jmp $$reta ; return from call +move $$tmp $sp ; save base stack value +cfei i24 ; reserve space for saved registers +sw $$tmp $r0 i0 ; save $r0 +sw $$tmp $r1 i1 ; save $r1 +sw $$tmp $r2 i2 ; save $r2 +move $r2 $$arg0 ; save arg 0 +move $r0 $$arg1 ; save arg 1 +move $r1 $$reta ; save reta +eq $r0 $r2 $r0 +move $$retv $r0 ; set return value +move $$reta $r1 ; restore reta +subi $$tmp $sp i24 ; save base stack value +lw $r0 $$tmp i0 ; restore $r0 +lw $r1 $$tmp i1 ; restore $r1 +lw $r2 $$tmp i2 ; restore $r2 +cfsi i24 ; recover space from saved registers +jmp $$reta ; return from call .data: -data_0 .b256 0x0000000000000000000000000000000000000000000000000000000000000000 -data_1 .u64 0x16 -data_2 .u64 0x08 -data_3 .u64 0x6c -data_4 .u64 0x01 -data_5 .u64 0x02 -data_6 .u64 0x04 -data_7 .u64 0x64 -data_8 .u64 0x65 -data_9 .u64 0x79 -data_10 .u64 0xe0 -data_11 .u64 0x68 -data_12 .u64 0x80 -data_13 .u32 0xea1a0f91 +data_0 .bytes[32] 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................................ +data_1 .word 22 +data_2 .word 8 +data_3 .word 108 +data_4 .collection { .word 1, .word 2, .word 4, .word 100 } +data_5 .collection { .word 101, .word 121, .word 224, .word 104 } +data_6 .word 128 +data_7 .word 3927576465 diff --git a/sway-core/tests/ir_to_asm/storage_store_load_intrinsics.ir b/sway-core/tests/ir_to_asm/storage_store_load_intrinsics.ir index 3330cc38f6b..ce6b2dd2793 100644 --- a/sway-core/tests/ir_to_asm/storage_store_load_intrinsics.ir +++ b/sway-core/tests/ir_to_asm/storage_store_load_intrinsics.ir @@ -244,55 +244,6 @@ contract { ret u64 v104 } - fn sha256_0(param !106: u64) -> b256, !7 { - local mut ptr b256 result_buffer - local ptr u64 size - - entry: - v0 = call min_1(), !8 - v1 = get_ptr mut ptr b256 result_buffer, ptr b256, 0, !13 - store v0, ptr v1, !13 - v2 = const bool false - v3 = call not_2(v2), !11 - cbr v3, block0, block1, !11 - - block0: - v4 = get_ptr mut ptr b256 result_buffer, ptr b256, 0, !20 - v5 = load ptr v4, !20 - v6 = const u64 8, !24 - v7 = asm(buffer, ptr: param, eight_bytes: v6, hash: v5) -> b256 hash, !25 { - move buffer sp, !27 - cfei i8, !28 - sw buffer ptr i0, !29 - s256 hash buffer eight_bytes, !30 - cfsi i8, !31 - } - br block2 - - block1: - v8 = get_ptr ptr u64 size, ptr u64, 0, !32 - v9 = const u64 8 - store v9, ptr v8, !32 - v10 = get_ptr mut ptr b256 result_buffer, ptr b256, 0, !35 - v11 = load ptr v10, !35 - v12 = get_ptr ptr u64 size, ptr u64, 0, !38 - v13 = load ptr v12, !38 - v14 = asm(hash: v11, ptr: param, bytes: v13) -> b256 hash, !41 { - s256 hash ptr bytes, !43 - } - br block2 - - block2: - v15 = phi(block0: v7, block1: v14) - ret b256 v15 - } - - fn min_1() -> b256, !107 { - entry: - v0 = const b256 0x0000000000000000000000000000000000000000000000000000000000000000, !5 - ret b256 v0 - } - fn not_2(a !108: bool) -> bool, !11 { entry: v0 = asm(r1: a, r2) -> bool r2, !16 { diff --git a/sway-core/tests/ir_to_asm/strings_in_storage.asm b/sway-core/tests/ir_to_asm/strings_in_storage.asm index ecca5868bc4..2a7d91d88ac 100644 --- a/sway-core/tests/ir_to_asm/strings_in_storage.asm +++ b/sway-core/tests/ir_to_asm/strings_in_storage.asm @@ -5,39 +5,40 @@ DATA_SECTION_OFFSET[0..32] DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is -lw $r1 $fp i73 ; load input function selector -lw $r0 data_3 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i14 ; jump to selected function -lw $r0 data_4 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i39 ; jump to selected function -rvrt $zero ; revert if no selectors matched -move $r3 $sp ; save locals base register -cfei i96 ; allocate 96 bytes for all locals -lw $r2 $fp i74 ; Base register for method parameter -addi $r0 $r3 i0 ; get offset reg for get_ptr +lw $r0 $fp i73 ; load input function selector +lw $r1 data_3 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i15 ; jump to selected function +lw $r1 data_4 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i40 ; jump to selected function +movi $$tmp i123 ; special code for mismatched selector +rvrt $$tmp ; revert if no selectors matched +lw $r3 $fp i74 ; base register for method parameter +move $r2 $sp ; save locals base register +cfei i96 ; allocate 96 bytes for locals +addi $r0 $r2 i0 ; get offset reg for get_ptr lw $r1 data_0 ; literal instantiation -addi $r0 $r3 i0 ; get store offset +addi $r0 $r2 i0 ; get store offset mcpi $r0 $r1 i32 ; store value -addi $r0 $r3 i32 ; get offset reg for get_ptr -addi $r0 $r3 i32 ; get store offset -mcpi $r0 $r2 i64 ; store value -addi $r0 $r3 i32 ; get offset reg for get_ptr -addi $r1 $r3 i32 ; get offset -addi $r0 $r3 i0 ; get offset -swwq $r0 $r1 ; quad word state access -addi $r0 $r3 i0 ; get offset reg for get_ptr +addi $r0 $r2 i32 ; get offset reg for get_ptr +addi $r0 $r2 i32 ; get store offset +mcpi $r0 $r3 i64 ; store value +addi $r0 $r2 i32 ; get offset reg for get_ptr +addi $r0 $r2 i32 ; get offset +addi $r1 $r2 i0 ; get offset +swwq $r1 $r0 ; quad word state access +addi $r0 $r2 i0 ; get offset reg for get_ptr lw $r1 data_1 ; literal instantiation -addi $r0 $r3 i0 ; get store offset +addi $r0 $r2 i0 ; get store offset mcpi $r0 $r1 i32 ; store value -addi $r0 $r3 i64 ; get offset reg for get_ptr -addi $r1 $r3 i64 ; get offset -addi $r0 $r3 i0 ; get offset +addi $r0 $r2 i64 ; get offset reg for get_ptr +addi $r1 $r2 i64 ; get offset +addi $r0 $r2 i0 ; get offset swwq $r0 $r1 ; quad word state access ret $zero ; returning unit as zero move $r3 $sp ; save locals base register -cfei i96 ; allocate 96 bytes for all locals +cfei i96 ; allocate 96 bytes for locals addi $r0 $r3 i0 ; get offset reg for get_ptr lw $r1 data_0 ; literal instantiation addi $r0 $r3 i0 ; get store offset @@ -57,10 +58,9 @@ addi $r0 $r3 i0 ; get offset srwq $r1 $r0 ; quad word state access lw $r0 data_2 ; loading size for RETD retd $r2 $r0 -noop ; word-alignment of data section .data: -data_0 .b256 0xf383b0ce51358be57daa3b725fe44acdb2d880604e367199080b4379c41bb6ed -data_1 .b256 0xf383b0ce51358be57daa3b725fe44acdb2d880604e367199080b4379c41bb6ee -data_2 .u64 0x28 -data_3 .u32 0xe63a9733 -data_4 .u32 0xb8c27db9 +data_0 .bytes[32] f3 83 b0 ce 51 35 8b e5 7d aa 3b 72 5f e4 4a cd b2 d8 80 60 4e 36 71 99 08 0b 43 79 c4 1b b6 ed ....Q5..}.;r_.J....`N6q...Cy.... +data_1 .bytes[32] f3 83 b0 ce 51 35 8b e5 7d aa 3b 72 5f e4 4a cd b2 d8 80 60 4e 36 71 99 08 0b 43 79 c4 1b b6 ee ....Q5..}.;r_.J....`N6q...Cy.... +data_2 .word 40 +data_3 .word 3862599475 +data_4 .word 3099753913 diff --git a/sway-core/tests/ir_to_asm/struct_in_union.asm b/sway-core/tests/ir_to_asm/struct_in_union.asm index c6e92d8b1e1..b0496f93a5d 100644 --- a/sway-core/tests/ir_to_asm/struct_in_union.asm +++ b/sway-core/tests/ir_to_asm/struct_in_union.asm @@ -6,30 +6,24 @@ DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is move $r3 $sp ; save locals base register -cfei i8 ; allocate 8 bytes for all locals -move $r0 $sp ; save register for temporary stack value -cfei i8 ; allocate 8 bytes for temporary struct -lw $r1 data_0 ; literal instantiation for aggregate field -sw $r0 $r1 i0 ; initialise aggregate field -move $r2 $sp ; save register for temporary stack value -cfei i16 ; allocate 16 bytes for temporary struct -lw $r1 data_1 ; literal instantiation for aggregate field -sw $r2 $r1 i0 ; initialise aggregate field +cfei i8 ; allocate 8 bytes for locals +lw $r0 data_0 ; literal instantiation +lw $r2 data_1 ; literal instantiation addi $r1 $r2 i8 ; get struct field(s) 1 offset mcpi $r1 $r0 i8 ; store struct field value lw $r0 $r2 i0 ; extract_value @ 0 eq $r0 $r0 $one -jnzi $r0 i22 -ji i29 -addi $r0 $r2 i8 ; extract address -addi $r1 $r3 i0 ; get offset reg for get_ptr -addi $r1 $r3 i0 ; get store offset -mcpi $r1 $r0 i8 ; store value +jnzi $r0 i18 +ji i25 +addi $r1 $r2 i8 ; extract address +addi $r0 $r3 i0 ; get offset reg for get_ptr +addi $r0 $r3 i0 ; get store offset +mcpi $r0 $r1 i8 ; store value addi $r0 $r3 i0 ; get offset reg for get_ptr lw $r0 $r0 i0 ; extract_value @ 0 -ji i30 +ji i26 move $r0 $zero ; branch to phi value ret $r0 .data: -data_0 .u64 0x2a -data_1 .u64 0x01 +data_0 .collection { .word 42 } +data_1 .collection { .word 1, .word 0 } diff --git a/sway-core/tests/ir_to_asm/takes_string_returns_string.asm b/sway-core/tests/ir_to_asm/takes_string_returns_string.asm index 7b6445cde31..fa5a42b401e 100644 --- a/sway-core/tests/ir_to_asm/takes_string_returns_string.asm +++ b/sway-core/tests/ir_to_asm/takes_string_returns_string.asm @@ -5,23 +5,23 @@ DATA_SECTION_OFFSET[0..32] DATA_SECTION_OFFSET[32..64] lw $ds $is 1 add $$ds $$ds $is -lw $r1 $fp i73 ; load input function selector -lw $r0 data_2 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i14 ; jump to selected function -lw $r0 data_3 ; load fn selector for comparison -eq $r0 $r1 $r0 ; function selector comparison -jnzi $r0 i17 ; jump to selected function -rvrt $zero ; revert if no selectors matched -lw $r1 $fp i74 ; Base register for method parameter +lw $r0 $fp i73 ; load input function selector +lw $r1 data_2 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i15 ; jump to selected function +lw $r1 data_3 ; load fn selector for comparison +eq $r2 $r0 $r1 ; function selector comparison +jnzi $r2 i18 ; jump to selected function +movi $$tmp i123 ; special code for mismatched selector +rvrt $$tmp ; revert if no selectors matched +lw $r1 $fp i74 ; base register for method parameter lw $r0 data_0 ; loading size for RETD retd $r1 $r0 -lw $r1 $fp i74 ; Base register for method parameter +lw $r1 $fp i74 ; base register for method parameter lw $r0 data_1 ; loading size for RETD retd $r1 $r0 -noop ; word-alignment of data section .data: -data_0 .u64 0x08 -data_1 .u64 0x10 -data_2 .u32 0x80da70e2 -data_3 .u32 0x28c0f699 +data_0 .word 8 +data_1 .word 16 +data_2 .word 2161799394 +data_3 .word 683734681 diff --git a/sway-ir/src/asm.rs b/sway-ir/src/asm.rs index f175122732f..ddb2d8b2093 100644 --- a/sway-ir/src/asm.rs +++ b/sway-ir/src/asm.rs @@ -72,6 +72,11 @@ impl AsmBlock { context.asm_blocks[self.0].return_type } + /// Get a reference to the [`AsmBlockContent`] for this ASM block. + pub fn get_content<'a>(&self, context: &'a Context) -> &'a AsmBlockContent { + &context.asm_blocks[self.0] + } + pub fn is_diverging(&self, context: &Context) -> bool { let content = &context.asm_blocks[self.0]; content diff --git a/sway-ir/src/bin/opt.rs b/sway-ir/src/bin/opt.rs index 139357e29cd..619f194bcb7 100644 --- a/sway-ir/src/bin/opt.rs +++ b/sway-ir/src/bin/opt.rs @@ -77,10 +77,13 @@ trait NamedPass { ir: &mut Context, mut run_on_fn: F, ) -> Result { - let funcs = ir.functions.iter().map(|(idx, _)| idx).collect::>(); + let funcs = ir + .module_iter() + .flat_map(|module| module.function_iter(ir)) + .collect::>(); let mut modified = false; - for idx in funcs { - if run_on_fn(ir, &Function(idx))? { + for func in funcs { + if run_on_fn(ir, &func)? { modified = true; } } @@ -136,11 +139,11 @@ impl NamedPass for InlinePass { fn run(ir: &mut Context) -> Result { // For now we inline everything into `main()`. Eventually we can be more selective. let main_fn = ir - .functions - .iter() - .find_map(|(idx, fc)| if fc.name == "main" { Some(idx) } else { None }) + .module_iter() + .flat_map(|module| module.function_iter(ir)) + .find(|f| f.get_name(ir) == "main") .unwrap(); - optimize::inline_all_function_calls(ir, &Function(main_fn)) + optimize::inline_all_function_calls(ir, &main_fn) } } diff --git a/sway-ir/src/constant.rs b/sway-ir/src/constant.rs index 4e2f42bed6f..166d938f802 100644 --- a/sway-ir/src/constant.rs +++ b/sway-ir/src/constant.rs @@ -99,10 +99,6 @@ impl Constant { } } - pub fn get_undef(context: &mut Context, ty: Type) -> Value { - Value::new_constant(context, Constant::new_undef(context, ty)) - } - pub fn get_unit(context: &mut Context) -> Value { Value::new_constant(context, Constant::new_unit()) } diff --git a/sway-ir/src/context.rs b/sway-ir/src/context.rs index d66229f3654..40fb521db03 100644 --- a/sway-ir/src/context.rs +++ b/sway-ir/src/context.rs @@ -20,14 +20,14 @@ use crate::{ /// managed by the context. #[derive(Default)] pub struct Context { - pub modules: Arena, - pub functions: Arena, - pub blocks: Arena, - pub values: Arena, - pub pointers: Arena, - pub aggregates: Arena, - pub asm_blocks: Arena, - pub metadata: Arena, + pub(crate) modules: Arena, + pub(crate) functions: Arena, + pub(crate) blocks: Arena, + pub(crate) values: Arena, + pub(crate) pointers: Arena, + pub(crate) aggregates: Arena, + pub(crate) asm_blocks: Arena, + pub(crate) metadata: Arena, next_unique_sym_tag: u64, } diff --git a/sway-ir/src/function.rs b/sway-ir/src/function.rs index f78290452b0..b034b6cca44 100644 --- a/sway-ir/src/function.rs +++ b/sway-ir/src/function.rs @@ -242,6 +242,11 @@ impl Function { context.functions[self.0].blocks[0] } + /// Return the attached metadata. + pub fn get_metadata(&self, context: &Context) -> Option { + context.functions[self.0].metadata + } + /// Whether this function has a valid selector. pub fn has_selector(&self, context: &Context) -> bool { context.functions[self.0].selector.is_some() @@ -252,6 +257,11 @@ impl Function { context.functions[self.0].selector } + // Get the function return type. + pub fn get_return_type(&self, context: &Context) -> Type { + context.functions[self.0].return_type + } + /// Get an arg value by name, if found. pub fn get_arg(&self, context: &Context, name: &str) -> Option { context.functions[self.0] diff --git a/sway-ir/src/irtype.rs b/sway-ir/src/irtype.rs index f9433f0fdc7..43e9f4c02ee 100644 --- a/sway-ir/src/irtype.rs +++ b/sway-ir/src/irtype.rs @@ -165,6 +165,11 @@ impl Aggregate { context.aggregates[self.0].eq(context, &context.aggregates[other.0]) } + /// Get a reference to the [`AggregateContent`] for this aggregate. + pub fn get_content<'a>(&self, context: &'a Context) -> &'a AggregateContent { + &context.aggregates[self.0] + } + /// Get the type of (nested) aggregate fields, if found. If an index is into a `Union` then it /// will get the type of the indexed variant. pub fn get_field_type(&self, context: &Context, indices: &[u64]) -> Option { diff --git a/sway-ir/src/metadata.rs b/sway-ir/src/metadata.rs index a88d572f2dc..54adfe3a585 100644 --- a/sway-ir/src/metadata.rs +++ b/sway-ir/src/metadata.rs @@ -6,6 +6,7 @@ ///! ///! The metadata themselves are opaque to `sway-ir` and are represented with simple value types; ///! integers, strings, symbols (tags) and lists. +use crate::context::Context; #[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd, Hash)] pub struct MetadataIndex(pub generational_arena::Index); @@ -28,7 +29,7 @@ pub enum Metadatum { /// This function conveniently has all the logic to return the simplest combination of two /// `Option`s. pub fn combine( - context: &mut crate::context::Context, + context: &mut Context, md_idx_a: &Option, md_idx_b: &Option, ) -> Option { @@ -57,6 +58,40 @@ pub fn combine( } } +impl MetadataIndex { + pub fn new_integer(context: &mut Context, int: u64) -> Self { + MetadataIndex(context.metadata.insert(Metadatum::Integer(int))) + } + + pub fn new_index(context: &mut Context, idx: MetadataIndex) -> Self { + MetadataIndex(context.metadata.insert(Metadatum::Index(idx))) + } + + pub fn new_string>(context: &mut Context, s: S) -> Self { + MetadataIndex(context.metadata.insert(Metadatum::String(s.into()))) + } + + pub fn new_struct>( + context: &mut Context, + tag: S, + fields: Vec, + ) -> Self { + MetadataIndex( + context + .metadata + .insert(Metadatum::Struct(tag.into(), fields)), + ) + } + + pub fn new_list(context: &mut Context, els: Vec) -> Self { + MetadataIndex(context.metadata.insert(Metadatum::List(els))) + } + + pub fn get_content<'a>(&self, context: &'a Context) -> &'a Metadatum { + &context.metadata[self.0] + } +} + impl Metadatum { pub fn unwrap_integer(&self) -> Option { if let Metadatum::Integer(n) = self { @@ -88,4 +123,12 @@ impl Metadatum { _otherwise => None, } } + + pub fn unwrap_list(&self) -> Option<&[MetadataIndex]> { + if let Metadatum::List(els) = self { + Some(els) + } else { + None + } + } } diff --git a/sway-ir/src/module.rs b/sway-ir/src/module.rs index b5cefe096be..1a5b1e47979 100644 --- a/sway-ir/src/module.rs +++ b/sway-ir/src/module.rs @@ -23,7 +23,7 @@ pub struct ModuleContent { } /// The different 'kinds' of Sway module: `Contract`, `Library`, `Predicate` or `Script`. -#[derive(Clone, Copy, Debug)] +#[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum Kind { Contract, Library, @@ -61,6 +61,18 @@ impl Module { pub fn get_global_constant(&self, context: &Context, name: &str) -> Option { context.modules[self.0].globals.get(name).copied() } + + /// Removed a function from the module. Returns true if function was found and removed. + /// + /// **Use with care! Be sure the function is not an entry point nor called at any stage.** + pub fn remove_function(&self, context: &mut Context, function: &Function) { + context + .modules + .get_mut(self.0) + .expect("Module must exist in context.") + .functions + .retain(|mod_fn| mod_fn != function); + } } /// An iterator over [`Module`]s within a [`Context`]. diff --git a/sway-ir/src/optimize/dce.rs b/sway-ir/src/optimize/dce.rs index ddcb5bf7304..f376e1a8bf3 100644 --- a/sway-ir/src/optimize/dce.rs +++ b/sway-ir/src/optimize/dce.rs @@ -5,12 +5,9 @@ //! 2. At the time of inspecting a definition, if it has no uses, it is removed. //! This pass does not do CFG transformations. That is handled by simplify_cfg. -use crate::{ - context::Context, error::IrError, function::Function, instruction::Instruction, - value::ValueDatum, Block, Value, -}; +use crate::{Block, Context, Function, Instruction, IrError, Module, Value, ValueDatum}; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; fn can_eliminate_instruction(context: &Context, val: Value) -> bool { let inst = val.get_instruction(context).unwrap(); @@ -158,3 +155,54 @@ pub fn dce(context: &mut Context, function: &Function) -> Result Ok(modified) } + +/// Remove entire functions from a module based on whether they are called or not, using a list of +/// root 'entry' functions to perform a search. +/// +/// Functions which are `pub` will not be removed and only functions within the passed [`Module`] +/// are considered for removal. +pub fn func_dce(context: &mut Context, module: &Module, entry_fns: &[Function]) -> bool { + // Recursively find all the functions called by an entry function. + fn grow_called_function_set( + context: &Context, + caller: Function, + called_set: &mut HashSet, + ) { + if called_set.insert(caller) { + // We haven't seen caller before. Iterate for all that it calls. + for func in caller + .instruction_iter(context) + .filter_map(|(_block, ins_value)| { + ins_value + .get_instruction(context) + .and_then(|ins| match ins { + Instruction::Call(f, _args) => Some(f), + _otherwise => None, + }) + }) + { + grow_called_function_set(context, *func, called_set); + } + } + } + + // Gather our entry functions together into a set. + let mut called_fns: HashSet = HashSet::new(); + for entry_fn in entry_fns { + grow_called_function_set(context, *entry_fn, &mut called_fns); + } + + // Gather the functions in the module which aren't called. It's better to collect them + // separately first so as to avoid any issues with invalidating the function iterator. + let dead_fns = module + .function_iter(context) + .filter(|f| !called_fns.contains(f)) + .collect::>(); + + let modified = !dead_fns.is_empty(); + for dead_fn in dead_fns { + module.remove_function(context, &dead_fn); + } + + modified +} diff --git a/sway-ir/src/pointer.rs b/sway-ir/src/pointer.rs index 188a6aea7a5..68ce22b510c 100644 --- a/sway-ir/src/pointer.rs +++ b/sway-ir/src/pointer.rs @@ -49,6 +49,16 @@ impl Pointer { &context.pointers[self.0].ty } + /// Return the initializer for this pointer. + pub fn get_initializer<'a>(&self, context: &'a Context) -> Option<&'a Constant> { + context.pointers[self.0].initializer.as_ref() + } + + /// Return whether the pointer is to a mutable value. + pub fn is_mutable(&self, context: &Context) -> bool { + context.pointers[self.0].is_mutable + } + /// Return whether this pointer is to a [`Type::Struct`] in particular. pub fn is_aggregate_ptr(&self, context: &Context) -> bool { matches!( diff --git a/sway-ir/src/value.rs b/sway-ir/src/value.rs index a3d4632a57f..884dfccae33 100644 --- a/sway-ir/src/value.rs +++ b/sway-ir/src/value.rs @@ -134,25 +134,27 @@ impl Value { context.values[self.0].value = other; } - pub fn get_instruction_mut<'a>(&self, context: &'a mut Context) -> Option<&'a mut Instruction> { - if let ValueDatum::Instruction(instruction) = - &mut context.values.get_mut(self.0).unwrap().value - { + /// Get a reference to this value as an instruction, iff it is one. + pub fn get_instruction<'a>(&self, context: &'a Context) -> Option<&'a Instruction> { + if let ValueDatum::Instruction(instruction) = &context.values.get(self.0).unwrap().value { Some(instruction) } else { None } } - pub fn get_instruction<'a>(&self, context: &'a Context) -> Option<&'a Instruction> { - if let ValueDatum::Instruction(instruction) = &context.values.get(self.0).unwrap().value { + /// Get a mutable reference to this value as an instruction, iff it is one. + pub fn get_instruction_mut<'a>(&self, context: &'a mut Context) -> Option<&'a mut Instruction> { + if let ValueDatum::Instruction(instruction) = + &mut context.values.get_mut(self.0).unwrap().value + { Some(instruction) } else { None } } - /// Get reference to the Constant inside this value, if it's one. + /// Get a reference to this value as a constant, iff it is one. pub fn get_constant<'a>(&self, context: &'a Context) -> Option<&'a Constant> { if let ValueDatum::Constant(cn) = &context.values.get(self.0).unwrap().value { Some(cn) @@ -161,6 +163,15 @@ impl Value { } } + /// Iff this value is an argument, return its type. + pub fn get_argument_type(&self, context: &Context) -> Option { + if let ValueDatum::Argument(ty) = &context.values.get(self.0).unwrap().value { + Some(*ty) + } else { + None + } + } + /// Get the type for this value, if found. /// /// Arguments and constants always have a type, but only some instructions do. diff --git a/sway-ir/src/verify.rs b/sway-ir/src/verify.rs index 1e01697e8c2..0aaa286003b 100644 --- a/sway-ir/src/verify.rs +++ b/sway-ir/src/verify.rs @@ -408,7 +408,7 @@ impl<'a> InstructionVerifier<'a> { // user args. // - The coins and gas must be u64s. // - The asset_id must be a B256 - if let Some(Type::Struct(agg)) = params.get_type(self.context) { + if let Some(Type::Struct(agg)) = params.get_stripped_ptr_type(self.context) { let fields = self.context.aggregates[agg.0].field_types(); if fields.len() != 3 || !fields[0].eq(self.context, &Type::B256) @@ -521,7 +521,7 @@ impl<'a> InstructionVerifier<'a> { Err(IrError::VerifyAccessElementInconsistentTypes) } else if self.opt_ty_not_eq( &ty.get_elem_type(self.context), - &value.get_type(self.context), + &value.get_stripped_ptr_type(self.context), ) { Err(IrError::VerifyInsertElementOfIncorrectType) } else if !matches!(index_val.get_type(self.context), Some(Type::Uint(_))) { diff --git a/sway-ir/tests/tests.rs b/sway-ir/tests/tests.rs index 79270b4b5ed..ae6f2aef2a3 100644 --- a/sway-ir/tests/tests.rs +++ b/sway-ir/tests/tests.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use sway_ir::{optimize as opt, Context, Function}; +use sway_ir::{optimize as opt, Context}; // ------------------------------------------------------------------------------------------------- // Utility for finding test files and running FileCheck. See actual pass invocations below. @@ -69,12 +69,15 @@ fn inline() { words }; - let fn_idcs: Vec<_> = ir.functions.iter().map(|func| func.0).collect(); + let funcs = ir + .module_iter() + .flat_map(|module| module.function_iter(ir)) + .collect::>(); if params.iter().any(|&p| p == "all") { // Just inline everything, replacing all CALL instructions. - fn_idcs.into_iter().fold(false, |acc, fn_idx| { - opt::inline_all_function_calls(ir, &Function(fn_idx)).unwrap() || acc + funcs.into_iter().fold(false, |acc, func| { + opt::inline_all_function_calls(ir, &func).unwrap() || acc }) } else { // Get the parameters from the first line. See the inline/README.md for details. If @@ -93,10 +96,10 @@ fn inline() { }, ); - fn_idcs.into_iter().fold(false, |acc, fn_idx| { + funcs.into_iter().fold(false, |acc, func| { opt::inline_some_function_calls( ir, - &Function(fn_idx), + &func, opt::is_small_fn(max_blocks, max_instrs, max_stack), ) .unwrap() @@ -114,10 +117,12 @@ fn inline() { #[test] fn constants() { run_tests("constants", |_first_line, ir: &mut Context| { - let fn_idcs: Vec<_> = ir.functions.iter().map(|func| func.0).collect(); - fn_idcs.into_iter().fold(false, |acc, fn_idx| { - sway_ir::optimize::combine_constants(ir, &sway_ir::function::Function(fn_idx)).unwrap() - || acc + let funcs: Vec<_> = ir + .module_iter() + .flat_map(|module| module.function_iter(ir)) + .collect(); + funcs.into_iter().fold(false, |acc, func| { + sway_ir::optimize::combine_constants(ir, &func).unwrap() || acc }) }) } @@ -128,10 +133,12 @@ fn constants() { #[test] fn simplify_cfg() { run_tests("simplify_cfg", |_first_line, ir: &mut Context| { - let fn_idcs: Vec<_> = ir.functions.iter().map(|func| func.0).collect(); - fn_idcs.into_iter().fold(false, |acc, fn_idx| { - sway_ir::optimize::simplify_cfg(ir, &sway_ir::function::Function(fn_idx)).unwrap() - || acc + let funcs: Vec<_> = ir + .module_iter() + .flat_map(|module| module.function_iter(ir)) + .collect(); + funcs.into_iter().fold(false, |acc, func| { + sway_ir::optimize::simplify_cfg(ir, &func).unwrap() || acc }) }) } @@ -142,9 +149,12 @@ fn simplify_cfg() { #[test] fn dce() { run_tests("dce", |_first_line, ir: &mut Context| { - let fn_idcs: Vec<_> = ir.functions.iter().map(|func| func.0).collect(); - fn_idcs.into_iter().fold(false, |acc, fn_idx| { - sway_ir::optimize::dce(ir, &sway_ir::function::Function(fn_idx)).unwrap() || acc + let funcs: Vec<_> = ir + .module_iter() + .flat_map(|module| module.function_iter(ir)) + .collect(); + funcs.into_iter().fold(false, |acc, func| { + sway_ir::optimize::dce(ir, &func).unwrap() || acc }) }) } diff --git a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/array_of_structs_caller/src/main.sw b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/array_of_structs_caller/src/main.sw index 67400907aba..168b77f1cae 100644 --- a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/array_of_structs_caller/src/main.sw +++ b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/array_of_structs_caller/src/main.sw @@ -3,16 +3,16 @@ use array_of_structs_abi::{Id, TestContract, Wrapper}; use std::{assert::assert, hash::sha256}; fn main() -> u64 { - let addr = abi(TestContract, 0x4c963e7006e41055f19915d5ac64bd68b971e05efee75ba85a23862b80837d7a); + let addr = abi(TestContract, 0xda0aa9fce0e09044c49954b15d9243bedf40c97e8d64be5c36de4b2d25b1a43a); let input = [Wrapper { id: Id { - number: 42, + number: 42, }, }, Wrapper { id: Id { - number: 66, + number: 66, }, }, ]; diff --git a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/bal_opcode/src/main.sw b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/bal_opcode/src/main.sw index 24253f6a067..fdd801ba931 100644 --- a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/bal_opcode/src/main.sw +++ b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/bal_opcode/src/main.sw @@ -5,7 +5,7 @@ use balance_test_abi::BalanceTest; fn main() -> bool { // @todo switch to using ContractId when abi signature changes. - let balance_test_contract_id = 0x597e5ddb1a6bec92a96a73e4f0bc6f6e3e7b21f5e03e1c812cd63cffac480463; + let balance_test_contract_id = 0x8dad20e27b24939770b4c58237e7b4ffa1dd11c2d2788feb30914eb57ad85c4f; let balance_test_contract = abi(BalanceTest, balance_test_contract_id); let number = balance_test_contract.get_42 { diff --git a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_abi_with_tuples/src/main.sw b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_abi_with_tuples/src/main.sw index 7cf8763b3bd..a6192932e4b 100644 --- a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_abi_with_tuples/src/main.sw +++ b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_abi_with_tuples/src/main.sw @@ -4,7 +4,7 @@ use abi_with_tuples::*; use std::assert::assert; fn main() -> bool { - let the_abi = abi(MyContract, 0x32a5e5b389bda4bbf8edad1cdb3abe8e1e004bc947ebc6212e307ae7809b554f); + let the_abi = abi(MyContract, 0xf403dd5c8c89bf7202bf25d48a381f9d1755b32cd128c3053ef60435a4999bd7); let param1 = ( Person { diff --git a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_basic_storage/src/main.sw b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_basic_storage/src/main.sw index df1c22ede94..a7e7707a606 100644 --- a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_basic_storage/src/main.sw +++ b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_basic_storage/src/main.sw @@ -3,7 +3,7 @@ use basic_storage_abi::{StoreU64, Quad}; use std::assert::assert; fn main() -> u64 { - let addr = abi(StoreU64, 0xd992b1febc69f6102915ea1102fde7b88f3e484ec55a02b4539c91615dca84e7); + let addr = abi(StoreU64, 0x06ee714ac67bc939377e34466b2461daaa0914eaf3f1390e54ede488a63114b8); let key = 0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff; let value = 4242; diff --git a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_increment_contract/src/main.sw b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_increment_contract/src/main.sw index ad944cda49c..61b5283a72e 100644 --- a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_increment_contract/src/main.sw +++ b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/call_increment_contract/src/main.sw @@ -4,7 +4,7 @@ use increment_abi::Incrementor; use std::assert::assert; fn main() -> bool { - let the_abi = abi(Incrementor,0x2fa39fe61cb782b27091ad143f1d7a6e5eddd22d044cf1ab754dcc1218e182a9 ); + let the_abi = abi(Incrementor, 0x5b20234e200cd396540e7fd184c28cc266b1a4ed545ee2aba47ec76345ef1071); the_abi.increment(5); the_abi.increment(5); let result = the_abi.get(); diff --git a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/caller_auth_test/src/main.sw b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/caller_auth_test/src/main.sw index 92960954832..862227dca04 100644 --- a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/caller_auth_test/src/main.sw +++ b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/caller_auth_test/src/main.sw @@ -4,7 +4,7 @@ use std::assert::assert; // should be false in the case of a script fn main() -> bool { - let caller = abi(AuthTesting, 0x6868c510e230173e1f788fd7bdba127ffb92b7408d0e7fface1a32d03c004361); + let caller = abi(AuthTesting, 0xbd5727c9cdd8ae457f94a99cbba11966b50374f3d12c2f4649dd63fdb674361a); let result = caller.returns_gm_one(); assert(result); result diff --git a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/caller_context_test/src/main.sw b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/caller_context_test/src/main.sw index a96459ed55c..13b1f90e08d 100644 --- a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/caller_context_test/src/main.sw +++ b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/caller_context_test/src/main.sw @@ -7,7 +7,7 @@ fn main() -> bool { let zero = ~b256::min(); let gas: u64 = 1000; let amount: u64 = 11; - let other_contract_id = ~ContractId::from(0x18bf8e0d8f9ae71fe6448e18785f5aef719f40055e45672fa0e9e906f13eb289); + let other_contract_id = ~ContractId::from(0x84de7dc313a1f48a59148c8ff50c2ed59514d5ea21874ad0914c246420b5ecd9); let base_asset_id = BASE_ASSET_ID; let test_contract = abi(ContextTesting, other_contract_id.into()); diff --git a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/get_storage_key_caller/src/main.sw b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/get_storage_key_caller/src/main.sw index ee204c4f222..8e23ea05a0f 100644 --- a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/get_storage_key_caller/src/main.sw +++ b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/get_storage_key_caller/src/main.sw @@ -3,7 +3,7 @@ use get_storage_key_abi::TestContract; use std::assert::assert; fn main() -> u64 { - let caller = abi(TestContract, 0xde1fc5ef591759ed30d7ec21256339b11937d3b8d84229ca12915cc3637acdf9); + let caller = abi(TestContract, 0x18bd4f3ed949119c14c56a60367e1c06ee9afa88eb3a51f41a809b23b2dd8064); // Get the storage keys directly by calling the contract methods from_f1, // from_f2, from_f3, from_f4. The keys correspond to different entries in diff --git a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/nested_struct_args_caller/src/main.sw b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/nested_struct_args_caller/src/main.sw index e45d73edfe9..79b13d3ed89 100644 --- a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/nested_struct_args_caller/src/main.sw +++ b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/nested_struct_args_caller/src/main.sw @@ -4,7 +4,7 @@ use nested_struct_args_abi::*; use std::assert::assert; fn main() -> bool { - let contract_id = 0x774699460afc2ff7e2bd72bd3f26df1625a58ceaa91d90cbf3d70c8ab455ad3f; + let contract_id = 0x8f42239954e490e48bfbc22aade19f5169274fcbc4ffb2d6bfb7330954b73bd0; let caller = abi(NestedStructArgs, contract_id); let param_one = StructOne { diff --git a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/storage_access_caller/src/main.sw b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/storage_access_caller/src/main.sw index 1320cdaabec..6abd7c37cd4 100644 --- a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/storage_access_caller/src/main.sw +++ b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/storage_access_caller/src/main.sw @@ -3,7 +3,7 @@ use storage_access_abi::*; use std::{assert::assert, hash::sha256, revert::revert}; fn main() -> bool { - let contract_id = 0x874b633b6d9a900580e920afa2cdb1400de23b7056324974eb6f1ed7b1e05ce1; + let contract_id = 0x371ef9abf02c7f6888b18ce7eec5ced3f17bd5c17356c233077fdda796a1b416; let caller = abi(StorageAccess, contract_id); // Test initializers diff --git a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/token_ops_test/src/main.sw b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/token_ops_test/src/main.sw index 63cee6486e5..2868698ac94 100644 --- a/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/token_ops_test/src/main.sw +++ b/test/src/e2e_vm_tests/test_programs/should_pass/require_contract_deployment/token_ops_test/src/main.sw @@ -17,10 +17,10 @@ fn main() -> bool { let default_gas = 1_000_000_000_000; // the deployed fuel_coin Contract_Id: - let fuelcoin_id = ~ContractId::from(0xae7ffe3b9300b99d43119c289c2ca56cda96afb0f7c0438c06a98f596313708c); + let fuelcoin_id = ~ContractId::from(0xf611f6575617a7419d5be613c7ba8ad6ac1756a8200de909bb8f2845a72a9c0f); // contract ID for sway/test/src/e2e_vm_tests/test_programs/should_pass/test_contracts/balance_test_contract/ - let balance_test_id = ~ContractId::from(0x597e5ddb1a6bec92a96a73e4f0bc6f6e3e7b21f5e03e1c812cd63cffac480463); + let balance_test_id = ~ContractId::from(0x8dad20e27b24939770b4c58237e7b4ffa1dd11c2d2788feb30914eb57ad85c4f); // todo: use correct type ContractId let fuel_coin = abi(TestFuelCoin, fuelcoin_id.into()); diff --git a/test/src/ir_generation/mod.rs b/test/src/ir_generation/mod.rs index c670b6f6507..f44e9a5b1f9 100644 --- a/test/src/ir_generation/mod.rs +++ b/test/src/ir_generation/mod.rs @@ -115,7 +115,7 @@ pub(super) fn run(filter_regex: Option<®ex::Regex>) { .expect("there were no errors, so there should be a program"); // Compile to IR. - let mut ir = compile_program(typed_program) + let ir = compile_program(typed_program) .unwrap_or_else(|e| { panic!("Failed to compile test {}:\n{e}", path.display()); }) @@ -147,30 +147,15 @@ pub(super) fn run(filter_regex: Option<®ex::Regex>) { }; if let Some(asm_checker) = opt_asm_checker { - // Compile to ASM. Need to inline function calls beforehand. - let entry_point_functions: Vec<::sway_ir::Function> = ir - .functions - .iter() - .filter_map(|(idx, fc)| { - if fc.name == "main" || fc.selector.is_some() { - Some(::sway_ir::function::Function(idx)) - } else { - None - } - }) - .collect(); - - for function in entry_point_functions { - assert!( - sway_ir::optimize::inline_all_function_calls(&mut ir, &function).is_ok() - ); - } - + // Compile to ASM. let asm_result = compile_ir_to_asm(&ir, None); - assert!( - asm_result.is_ok(), - "Errors when compiling {test_file_name} IR to ASM." - ); + if !asm_result.is_ok() { + println!("Errors when compiling {test_file_name} IR to ASM:\n"); + for e in asm_result.errors { + println!("{e}\n"); + } + panic!(); + }; let asm_output = asm_result .value diff --git a/test/src/ir_generation/tests/enum.sw b/test/src/ir_generation/tests/enum.sw index bbca535b372..f16d78f3903 100644 --- a/test/src/ir_generation/tests/enum.sw +++ b/test/src/ir_generation/tests/enum.sw @@ -18,7 +18,7 @@ fn eat(meal: Fruit) -> bool { // check: local ptr { u64, ( () | () | u64 ) } lunch -// check: $(enum_undef=$VAL) = const { u64, ( () | () | u64 ) } { u64 undef, ( () | () | u64 ) undef } +// check: $(enum_undef=$VAL) = get_ptr ptr { u64, ( () | () | u64 ) } $ID, ptr { u64, ( () | () | u64 ) }, 0 // check: $(one_val=$VAL) = const u64 1 // check: $(enum_tagged=$VAL) = insert_value $enum_undef, { u64, ( () | () | u64 ) }, $one_val, 0 // check: $(lunch_ptr=$VAL) = get_ptr ptr { u64, ( () | () | u64 ) } lunch, ptr { u64, ( () | () | u64 ) }, 0 @@ -27,7 +27,7 @@ fn eat(meal: Fruit) -> bool { // check: $(lunch_ptr=$VAL) = get_ptr ptr { u64, ( () | () | u64 ) } lunch, ptr { u64, ( () | () | u64 ) }, 0 // check: call $(eat_fn=$ID)($lunch_ptr) -// check: $(enum_undef=$VAL) = const { u64, ( () | () | u64 ) } { u64 undef, ( () | () | u64 ) undef } +// check: $(enum_undef=$VAL) = get_ptr ptr { u64, ( () | () | u64 ) } $ID, ptr { u64, ( () | () | u64 ) }, 0 // check: $(two_val=$VAL) = const u64 2 // check: $(enum_tagged=$VAL) = insert_value $enum_undef, { u64, ( () | () | u64 ) }, $two_val, 0 // check: $(three_val=$VAL) = const u64 3 diff --git a/test/src/ir_generation/tests/enum_enum.sw b/test/src/ir_generation/tests/enum_enum.sw index 836ed66dd0c..053093d1fee 100644 --- a/test/src/ir_generation/tests/enum_enum.sw +++ b/test/src/ir_generation/tests/enum_enum.sw @@ -18,10 +18,10 @@ fn main() { // ::check-ir:: -// check: $(outer_undef=$VAL) = const { u64, ( () | { u64, ( () | bool | () ) } | () ) } { u64 undef, ( () | { u64, ( () | bool | () ) } | () ) undef } +// check: $(outer_undef=$VAL) = get_ptr ptr { u64, ( () | { u64, ( () | bool | () ) } | () ) } $ID, ptr { u64, ( () | { u64, ( () | bool | () ) } | () ) }, 0 // check: $(outer_tag=$VAL) = const u64 1 // check: $(outer_tagged=$VAL) = insert_value $outer_undef, { u64, ( () | { u64, ( () | bool | () ) } | () ) }, $outer_tag, 0 -// check: $(inner_undef=$VAL) = const { u64, ( () | bool | () ) } { u64 undef, ( () | bool | () ) undef } +// check: $(inner_undef=$VAL) = get_ptr ptr { u64, ( () | bool | () ) } $ID, ptr { u64, ( () | bool | () ) }, 0 // check: $(inner_tag=$VAL) = const u64 0 // check: $(inner_tagged=$VAL) = insert_value $inner_undef, { u64, ( () | bool | () ) }, $inner_tag, 0 // check: insert_value $outer_tagged, { u64, ( () | { u64, ( () | bool | () ) } | () ) }, $inner_tagged, 1 diff --git a/test/src/ir_generation/tests/enum_in_storage_read.sw b/test/src/ir_generation/tests/enum_in_storage_read.sw index b1d648a7c91..d018efa3521 100644 --- a/test/src/ir_generation/tests/enum_in_storage_read.sw +++ b/test/src/ir_generation/tests/enum_in_storage_read.sw @@ -37,13 +37,13 @@ impl StorageAccess for Contract { // check: local mut ptr [b256; 2] val_for_0_1 // check: local mut ptr [b256; 2] val_for_1_1 +// check: $(enum_undef=$VAL) = get_ptr ptr { u64, ( { u64, u64, u64, u64, u64 } | u64 ) } $ID, ptr { u64, ( { u64, u64, u64, u64, u64 } | u64 ) }, 0 // check: $(local_key_ptr=$VAL) = get_ptr mut ptr b256 key_for_0_0, ptr b256, 0 // check: $(key=$VAL) = const b256 0xd625ff6d8e88efd7bb3476e748e5d5935618d78bfc7eedf584fe909ce0809fc3 // check: store $key, ptr $local_key_ptr // check: $(stored_tag_ptr=$VAL) = state_load_word key ptr $local_key_ptr // check: $(stored_tag=$VAL) = bitcast $stored_tag_ptr to u64 -// check: $(enum_undef=$VAL) = const { u64, ( { u64, u64, u64, u64, u64 } | u64 ) } { u64 undef, ( { u64, u64, u64, u64, u64 } | u64 ) undef } // check: insert_value $enum_undef, { u64, ( { u64, u64, u64, u64, u64 } | u64 ) }, $stored_tag, 0 // check: $(local_key_ptr2=$VAL) = get_ptr mut ptr b256 key_for_0_1, ptr b256, 0 diff --git a/test/src/ir_generation/tests/enum_struct.sw b/test/src/ir_generation/tests/enum_struct.sw index 69c0321c01f..f3915603d4f 100644 --- a/test/src/ir_generation/tests/enum_struct.sw +++ b/test/src/ir_generation/tests/enum_struct.sw @@ -22,10 +22,10 @@ fn main() { // ::check-ir:: -// check: $(enum_undef=$VAL) = const { u64, ( () | { b256, bool, u64 } | () ) } { u64 undef, ( () | { b256, bool, u64 } | () ) undef } +// check: $(enum_undef=$VAL) = get_ptr ptr { u64, ( () | { b256, bool, u64 } | () ) } $ID, ptr { u64, ( () | { b256, bool, u64 } | () ) }, 0 // check: $(enum_tag=$VAL) = const u64 1 // check: $(enum_tagged=$VAL) = insert_value $enum_undef, { u64, ( () | { b256, bool, u64 } | () ) }, $enum_tag, 0 -// check: $(struct_undef=$VAL) = const { b256, bool, u64 } { b256 undef, bool undef, u64 undef } +// check: $(struct_undef=$VAL) = get_ptr ptr { b256, bool, u64 } $ID, ptr { b256, bool, u64 }, 0 // check: $(struct_0=$VAL) = insert_value $struct_undef, { b256, bool, u64 }, $VAL, 0 // check: $(struct_01=$VAL) = insert_value $struct_0, { b256, bool, u64 }, $VAL, 1, // check: $(struct_012=$VAL) = insert_value $struct_01, { b256, bool, u64 }, $VAL, 2 diff --git a/test/src/ir_generation/tests/fn_call.sw b/test/src/ir_generation/tests/fn_call.sw index 165961b89b7..680e5459aa2 100644 --- a/test/src/ir_generation/tests/fn_call.sw +++ b/test/src/ir_generation/tests/fn_call.sw @@ -4,17 +4,53 @@ fn a(x: u64) -> u64 { x } +fn b(x: u64, y: u64) -> u64 { + let var: bool = false; + if var { + x + } else { + y + } +} + fn main() -> u64 { a(0); - a(1) + a(1); + b(11, 22) } +// ::check-ir:: + // check: fn main() -> u64 // check: call // check: call +// check: call // check: ret u64 // check: fn $ID(x $MD: u64) -> u64 // check: entry: // check: ret u64 x // check: } + +// check: fn $ID(x $MD: u64, y $MD: u64) -> u64 +// check: local ptr bool var + +// ::check-asm:: +// +// regex: IMM=i\d+ +// regex: REG=\$[[:alpha:]][0-9[:alpha:]]* +// +// Matching fn a() here, which just returns its arg: +// +// check: move $(ret_val=$REG) $$$$arg0 +// check: move $$$$retv $ret_val +// check: jmp $$$$reta +// +// Matching fn b() here, which has a local bool var, initialised to false/$zero: +// +// check: move $(stack_ptr=$REG) $$sp +// check: cfei i8 +// check: sw $stack_ptr $$zero i0 +// ... +// check: cfsi i8 +// check: jmp $$$$reta diff --git a/test/src/ir_generation/tests/fn_call_noargs_nolocals.sw b/test/src/ir_generation/tests/fn_call_noargs_nolocals.sw new file mode 100644 index 00000000000..c84b1872a36 --- /dev/null +++ b/test/src/ir_generation/tests/fn_call_noargs_nolocals.sw @@ -0,0 +1,44 @@ +// This is more to test the ASM generation rather than IR. + +script; + +fn f() { + g() +} + +fn g() { +} + +fn main() { + f(); +} + +// ::check-ir:: + +// check: fn main() -> () + +// ::check-asm:: + +// regex: IMM=i\d+ +// regex: REG=\$r\d+ + +// Call a function: +// +// check: movi $$$$reta $IMM +// check: ji $IMM + +// Function calls other function, ignores result, returns unit/$zero. +// +// check: move $(reta_bk=$REG) $$$$reta +// check: movi $$$$reta $IMM +// check: ji $IMM +// check: move $$$$retv $$zero +// check: move $$$$reta $reta_bk +// check: jmp $$$$reta + +// Function returns unit. +// +// check: move $REG $$$$reta +// check: move $$$$retv $$zero +// check: move $$$$reta $REG +// check: jmp $$$$reta diff --git a/test/src/ir_generation/tests/fn_call_nolocals.sw b/test/src/ir_generation/tests/fn_call_nolocals.sw new file mode 100644 index 00000000000..1dce601beea --- /dev/null +++ b/test/src/ir_generation/tests/fn_call_nolocals.sw @@ -0,0 +1,39 @@ +// This is more to test the ASM generation rather than IR. + +script; + +fn add(lhs: u64, rhs: u64) -> u64 { + asm (l: lhs, r: rhs, x) { + add x r l; + x: u64 + } +} + +fn f(a: u64, b: u64, c: u64) -> u64 { + add(a, add(b, c)) +} + +fn g(x: u64, y: u64, z: u64) -> u64 { + f(f(x, x, y), f(y, y, z), f(z, z, z)) +} + +fn main() -> u64 { + g(1, 10, 100) +} + +// ::check-ir:: + +// check: fn main() -> u64 + +// ::check-asm:: + +// regex: IMM=i\d+ +// regex: REG=\$[[:alpha:]][0-9[:alpha:]]* + +// check: move $$$$arg0 $REG +// check: move $$$$arg1 $REG +// check: move $$$$arg2 $REG +// check: movi $$$$reta $IMM +// check: ji $IMM +// check: move $(ret_val=$REG) $$$$retv +// check: ret $ret_val diff --git a/test/src/ir_generation/tests/get_storage_key.sw b/test/src/ir_generation/tests/get_storage_key.sw index 558e477e9ac..1635dfa3a87 100644 --- a/test/src/ir_generation/tests/get_storage_key.sw +++ b/test/src/ir_generation/tests/get_storage_key.sw @@ -28,8 +28,8 @@ impl GetStorageKeyTest for Contract { } // check: fn foo1<2994c98e>() -> b256 -// nextln: entry: -// nextln: $(empty_struct_val=$VAL) = const { } { } +// check: entry: +// nextln: $(empty_struct_val=$VAL) = get_ptr ptr { } $ID, ptr { }, 0 // nextln: $(res=$VAL) = call $(fn_name=$ID)($empty_struct_val) // nextln: ret b256 $res diff --git a/test/src/ir_generation/tests/shadowed_locals.sw b/test/src/ir_generation/tests/shadowed_locals.sw index af0b58261ce..22a46f93392 100644 --- a/test/src/ir_generation/tests/shadowed_locals.sw +++ b/test/src/ir_generation/tests/shadowed_locals.sw @@ -23,7 +23,7 @@ fn main() -> u64 { // check: $(a__ptr=$VAL) = get_ptr ptr u64 a_, ptr u64, 0 // check: store $int_val, ptr $a__ptr -// check: $(struct_undef=$VAL) = const { u64 } { u64 undef } +// check: $(struct_undef=$VAL) = get_ptr ptr { u64 } $ID, ptr { u64 }, 0 // check: $(struct_set=$VAL) = insert_value $struct_undef, { u64 }, v9, 0 // check: $(a___ptr=$VAL) = get_ptr ptr { u64 } a__, ptr { u64 }, 0 // check: store $struct_set, ptr $a___ptr diff --git a/test/src/ir_generation/tests/simple_contract_call.sw b/test/src/ir_generation/tests/simple_contract_call.sw index 34d0931199e..a8460710afb 100644 --- a/test/src/ir_generation/tests/simple_contract_call.sw +++ b/test/src/ir_generation/tests/simple_contract_call.sw @@ -45,7 +45,7 @@ fn main() -> u64 { // BUILD THE PARAMS: contract id, selector and immediate argument. // check: $(get_u64_arg=$VAL) = const u64 1111 // check: $(get_u64_arg_bitcast=$VAL) = bitcast $get_u64_arg to u64 -// check: $(get_u64_params_undef=$VAL) = const { b256, u64, u64 } { b256 undef, u64 undef, u64 undef } +// check: $(get_u64_params_undef=$VAL) = get_ptr ptr { b256, u64, u64 } $ID, ptr { b256, u64, u64 }, 0 // check: $(contract_id=$VAL) = const b256 0x0c1c50c2bf5ba4bb351b4249a2f5e7d86556fcb4a6ae90465ff6c86126eeb3c0 // check: $(get_u64_params_0=$VAL) = insert_value $get_u64_params_undef, { b256, u64, u64 }, $contract_id, 0 // check: $(get_u64_selector=$VAL) = const u64 2559618804 @@ -66,7 +66,7 @@ fn main() -> u64 { // check: $(get_b256_arg_lit=$VAL) = const b256 0x3333333333333333333333333333333333333333333333333333333333333333 // check: store $get_b256_arg_lit, ptr $get_b256_arg // check: $(get_b256_arg_ptr=$VAL) = get_ptr ptr b256 arg_for_get_b256, ptr u64, 0 -// check: $(get_b256_params_undef=$VAL) = const { b256, u64, u64 } { b256 undef, u64 undef, u64 undef } +// check: $(get_b256_params_undef=$VAL) = get_ptr ptr { b256, u64, u64 } $ID, ptr { b256, u64, u64 }, 0 // check: $(contract_id=$VAL) = const b256 0x0c1c50c2bf5ba4bb351b4249a2f5e7d86556fcb4a6ae90465ff6c86126eeb3c0 // check: $(get_b256_params_0=$VAL) = insert_value $get_b256_params_undef, { b256, u64, u64 }, $contract_id, 0 // check: $(get_b256_selector=$VAL) = const u64 1108491158 @@ -90,7 +90,7 @@ fn main() -> u64 { // check: $(get_s_arg_y=$VAL) = const b256 0x5555555555555555555555555555555555555555555555555555555555555555 // check: $VAL = insert_value $get_s_arg_0, { u64, b256 }, $get_s_arg_y, 1 // check: $(get_s_arg_ptr=$VAL) = get_ptr mut ptr { u64, b256 } args_struct_for_get_s, ptr u64, 0 -// check: $(get_s_params_undef=$VAL) = const { b256, u64, u64 } { b256 undef, u64 undef, u64 undef } +// check: $(get_s_params_undef=$VAL) = get_ptr ptr { b256, u64, u64 } $ID, ptr { b256, u64, u64 }, 0 // check: $(contract_id=$VAL) = const b256 0x0c1c50c2bf5ba4bb351b4249a2f5e7d86556fcb4a6ae90465ff6c86126eeb3c0 // check: $(get_s_params_0=$VAL) = insert_value $get_s_params_undef, { b256, u64, u64 }, $contract_id, 0 // check: $(get_s_selector=$VAL) = const u64 4234334249 diff --git a/test/src/ir_generation/tests/struct.sw b/test/src/ir_generation/tests/struct.sw index b538148ab74..d685e16bfd1 100644 --- a/test/src/ir_generation/tests/struct.sw +++ b/test/src/ir_generation/tests/struct.sw @@ -15,7 +15,7 @@ struct Record { // check: local ptr { u64, u64 } record -// check: $(record_undef=$VAL) = const { u64, u64 } { u64 undef, u64 undef } +// check: $(record_undef=$VAL) = get_ptr ptr { u64, u64 } $ID, ptr { u64, u64 }, 0 // check: $(forty=$VAL) = const u64 40 // check: $(record_0=$VAL) = insert_value $record_undef, { u64, u64 }, $forty, 0 // check: $(two=$VAL) = const u64 2 diff --git a/test/src/ir_generation/tests/struct_enum.sw b/test/src/ir_generation/tests/struct_enum.sw index 8f1a1543ba6..a295ed1e123 100644 --- a/test/src/ir_generation/tests/struct_enum.sw +++ b/test/src/ir_generation/tests/struct_enum.sw @@ -21,10 +21,10 @@ enum Fruit { // check: local ptr { bool, { u64, ( () | () | u64 ) } } record -// check: $(enum_undef=$VAL) = const { u64, ( () | () | u64 ) } { u64 undef, ( () | () | u64 ) undef } +// check: $(enum_undef=$VAL) = get_ptr ptr { u64, ( () | () | u64 ) } $ID, ptr { u64, ( () | () | u64 ) }, 0 // check: $(zero=$VAL) = const u64 0 // check: $(enum_tagged=$VAL) = insert_value $enum_undef, { u64, ( () | () | u64 ) }, $zero, 0 -// check: $(struct_undef=$VAL) = const { bool, { u64, ( () | () | u64 ) } } { bool undef, { u64, ( () | () | u64 ) } { u64 undef, ( () | () | u64 ) undef } } +// check: $(struct_undef=$VAL) = get_ptr ptr { bool, { u64, ( () | () | u64 ) } } $ID, ptr { bool, { u64, ( () | () | u64 ) } }, 0 // check: $(f=$VAL) = const bool false // check: $(struct_0=$VAL) = insert_value $struct_undef, { bool, { u64, ( () | () | u64 ) } }, $f, 0 // check: $(struct_init=$VAL) = insert_value $struct_0, { bool, { u64, ( () | () | u64 ) } }, $enum_tagged, 1 diff --git a/test/src/ir_generation/tests/struct_struct.sw b/test/src/ir_generation/tests/struct_struct.sw index 1ca8d19ff63..1386f2060a9 100644 --- a/test/src/ir_generation/tests/struct_struct.sw +++ b/test/src/ir_generation/tests/struct_struct.sw @@ -23,12 +23,12 @@ struct Entry { // check: local ptr { b256, { bool, u64 } } record -// check: $(entry_undef=$VAL) = const { bool, u64 } { bool undef, u64 undef } +// check: $(entry_undef=$VAL) = get_ptr ptr { bool, u64 } $ID, ptr { bool, u64 }, 0 // check: $(t=$VAL) = const bool true // check: $(entry_0=$VAL) = insert_value $entry_undef, { bool, u64 }, $t, 0 // check: $(sevsix=$VAL) = const u64 76 // check: $(entry=$VAL) = insert_value $entry_0, { bool, u64 }, $sevsix, 1 -// check: $(record_undef=$VAL) = const { b256, { bool, u64 } } { b256 undef, { bool, u64 } { bool undef, u64 undef } } +// check: $(record_undef=$VAL) = get_ptr ptr { b256, { bool, u64 } } $ID, ptr { b256, { bool, u64 } }, 0 // check: $(b256_lit=$VAL) = const b256 0x0102030405060708010203040506070801020304050607080102030405060708 // check: $(record_0=$VAL) = insert_value $record_undef, { b256, { bool, u64 } }, $b256_lit, 0 // check: $(record=$VAL) = insert_value $record_0, { b256, { bool, u64 } }, $entry, 1 diff --git a/test/src/ir_generation/tests/unit_type_variants.sw b/test/src/ir_generation/tests/unit_type_variants.sw index b8dd35684f1..dc352cf7d81 100644 --- a/test/src/ir_generation/tests/unit_type_variants.sw +++ b/test/src/ir_generation/tests/unit_type_variants.sw @@ -13,8 +13,8 @@ fn main() -> E { // Since all variants are unit the tagged union has no value, it's just a tag. // check: fn main() -> { u64 } -// nextln: entry: -// nextln: $(enum_undef=$VAL) = const { u64 } { u64 undef } +// check: entry: +// nextln: $(enum_undef=$VAL) = get_ptr ptr { u64 } $ID, ptr { u64 }, 0 // nextln: $(two=$VAL) = const u64 2 // nextln: $(enum=$VAL) = insert_value $enum_undef, { u64 }, $two, 0 // nextln: ret { u64 } $enum diff --git a/test/src/sdk-harness/test_projects/storage/mod.rs b/test/src/sdk-harness/test_projects/storage/mod.rs index f3d643a0259..5ae3dcef754 100644 --- a/test/src/sdk-harness/test_projects/storage/mod.rs +++ b/test/src/sdk-harness/test_projects/storage/mod.rs @@ -214,3 +214,15 @@ async fn can_store_array() { let result = instance.methods().get_array().call().await.unwrap(); assert_eq!(result.value, a); } + +// TEMPORARILY DISABLED. +// +// This test can be reinstated when https://github.com/FuelLabs/sway/pull/2885 has been merged (and +// storage reads are made to the heap). +// +//#[tokio::test] +//async fn can_store_non_inlined() { +// let instance = get_test_storage_instance().await; +// let result = instance.storage_in_call().call().await.unwrap(); +// assert_eq!(result.value, 333); +//} diff --git a/test/src/sdk-harness/test_projects/storage/src/main.sw b/test/src/sdk-harness/test_projects/storage/src/main.sw index 4196a1fbb22..4612cac0406 100644 --- a/test/src/sdk-harness/test_projects/storage/src/main.sw +++ b/test/src/sdk-harness/test_projects/storage/src/main.sw @@ -1,6 +1,6 @@ contract; -use std::storage::{get, store}; +use std::{context::registers::stack_ptr, storage::{get, store}}; pub enum MediumEnum { One: u64, @@ -50,6 +50,7 @@ const S_11: b256 = 0x00000000000000000000000000000000000000000000000000000000000 const S_12: b256 = 0x0000000000000000000000000000000000000000000000000000000000000012; const S_13: b256 = 0x0000000000000000000000000000000000000000000000000000000000000013; const S_14: b256 = 0x0000000000000000000000000000000000000000000000000000000000000014; +const S_15: b256 = 0x0000000000000000000000000000000000000000000000000000000000000015; abi StorageTest { #[storage(write)] @@ -113,6 +114,9 @@ abi StorageTest { fn store_array(); #[storage(read)] fn get_array() -> [b256; 3]; + + #[storage(read, write)] + fn storage_in_call() -> u64; } impl StorageTest for Contract { @@ -261,4 +265,40 @@ impl StorageTest for Contract { fn get_array() -> [b256; 3] { get(S_14) } + + #[storage(read, write)] + fn storage_in_call() -> u64 { + // The point of this test is to call the storage functions from a non-entry point function, + // from a function which is _not_ inlined into the entry function. It then must preserve + // the stack properly and not leak data structures read or written on the stack, else the + // function call frame will be corrupt. + // + // To avoid inlining the function must be called multiple times and be sufficiently large. + let pre_sp = stack_ptr(); + let res = non_inlined_function(456_u32) && non_inlined_function(654_u32); + let post_sp = stack_ptr(); + + if pre_sp != post_sp { + 111 // Code to indicate bad stack (it would probably crash before here though). + } else if !res { + 222 // Code to indicate storage I/O failure. + } else { + 333 // Code for success - something non-trivial so we can't accidentally succeed. + } + } +} + +#[storage(read, write)] +fn non_inlined_function(arg: u32) -> bool { + // By storing and reading from a large complex data structure we're ensuring that this function + // is too large to be inlined. The stored value type must be a reference type too, to ensure + // the use of memory (not a register) to read it back. + store(S_15, LargeStruct { + x: arg, + y: 0x9999999999999999999999999999999999999999999999999999999999999999, + z: arg, + }); + + let ls: LargeStruct = get(S_15); + ls.x == arg }