diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index e70df2d..5253523 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -45,7 +45,7 @@ jobs: - name: Run tests (wasm32-wasip1) run: | echo "Running wasm32-wasip1 tests with wasmtime " - cargo test --verbose --target wasm32-wasip1 --features wasmtime + cargo test --verbose --target wasm32-wasip1 --features wasmtime --release - name: Check formatting run: cargo fmt --all --check diff --git a/Cargo.lock b/Cargo.lock index 8762523..c1f7013 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -78,6 +78,15 @@ version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +[[package]] +name = "arrayvec" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" +dependencies = [ + "serde", +] + [[package]] name = "autocfg" version = "1.4.0" @@ -137,6 +146,7 @@ name = "chiwawa" version = "0.1.0" dependencies = [ "anyhow", + "arrayvec", "bincode", "byteorder", "clap", diff --git a/Cargo.toml b/Cargo.toml index d847c5b..3a8bfad 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ trace = [] [dependencies] anyhow = "1.0.86" +arrayvec = { version = "0.7", features = ["serde"] } byteorder = "1.5.0" clap = { version = "4.5.7", features = ["derive"] } fancy-regex = "0.14.0" diff --git a/doc/folding.md b/doc/folding.md new file mode 100644 index 0000000..fca33fb --- /dev/null +++ b/doc/folding.md @@ -0,0 +1,78 @@ +# Operand Folding + +Operand folding is a preprocessing optimization that identifies patterns of consecutive instructions that can be merged into a single operation. This eliminates intermediate register allocations and reduces dispatch overhead. + +``` +Before folding: + i32.const 42 ; r0 = 42 + local.set 0 ; local[0] = r0 + +After folding: + i32.const 42 -> local[0] ; store 42 directly to local[0] +``` + +## Folding Types + +### Source Folding + +Folds constant values and local.get operations into consuming instructions. + +``` +Before: + i32.const 10 ; r0 = 10 + i32.const 20 ; r1 = 20 + i32.add ; r2 = r0 + r1 + +After: + i32.add (const 10), (const 20) -> r0 +``` + +Supported source operands: +- `i32.const`, `i64.const`, `f32.const`, `f64.const` +- `local.get` (typed: i32, i64, f32, f64) + +### Destination Folding + +Folds `local.set` into the preceding instruction that produces the value. + +``` +Before: + i32.add ; r0 = a + b + local.set 0 ; local[0] = r0 + +After: + i32.add -> local[0] ; result directly to local +``` + +When destination folding is applied, the instruction uses `RegOrLocal::Local` instead of `RegOrLocal::Reg` for its destination. + +### Address Folding (Memory Operations) + +For memory load/store operations, folds constant addresses. + +``` +Before: + i32.const 100 ; r0 = 100 (address) + i32.load ; r1 = memory[r0] + +After: + i32.load (addr: const 100) -> r1 +``` + +## Implementation + +Folding is performed during the preprocessing phase using a peek-ahead mechanism: + +1. **Pending Operand Stack**: When a foldable source instruction (const, local.get) is encountered, it is pushed to a pending stack instead of generating a register instruction. + +2. **Consumer Check**: When a consuming instruction is processed, it checks the pending stack for compatible operands. + +3. **Destination Check**: After processing an instruction, the parser peeks ahead to check if the next instruction is `local.set`. If so, the destination is changed from register to local. + +## Limitations + +- Folding only occurs for immediately adjacent instructions +- Control flow instructions (block, loop, if) break folding chains +- Reference types (funcref, externref) are not folded +- Type mismatch between pending operand and consumer prevents folding + diff --git a/src/execution/elem.rs b/src/execution/elem.rs index 411fe36..f7d98e1 100644 --- a/src/execution/elem.rs +++ b/src/execution/elem.rs @@ -25,7 +25,7 @@ impl ElemAddr { .map(|i| Ref::FuncAddr(funcs.get_by_idx(FuncIdx(*i as u32)).clone())) .collect(); ElemAddr(Rc::new(RefCell::new(ElemInst { - _type_: type_.clone(), + _type_: *type_, _elem: elem, }))) } diff --git a/src/execution/func.rs b/src/execution/func.rs index 2e3dfea..08273ac 100644 --- a/src/execution/func.rs +++ b/src/execution/func.rs @@ -4,13 +4,15 @@ use super::module::*; use super::value::{Val, WasiFuncAddr}; use crate::error::RuntimeError; use crate::structure::{module::*, types::*}; -use std::cell::{Ref, RefCell}; +use std::cell::UnsafeCell; use std::fmt::{self, Debug}; use std::rc::{Rc, Weak}; /// Reference-counted handle to a function instance. +/// Uses UnsafeCell for zero-cost access in the interpreter hot path. +/// Safety: WebAssembly execution is single-threaded and operations don't overlap. #[derive(Clone)] -pub struct FuncAddr(Rc>); +pub struct FuncAddr(Rc>); /// Function instance variants: runtime (Wasm), host, or WASI. pub enum FuncInst { @@ -31,10 +33,9 @@ pub enum FuncInst { impl Debug for FuncAddr { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self.0.try_borrow() { - Ok(guard) => write!(f, "FuncAddr({:?})", *guard), - Err(_) => write!(f, "FuncAddr()"), - } + // Safety: Single-threaded access + let inst = unsafe { &*self.0.get() }; + write!(f, "FuncAddr({:?})", inst) } } @@ -74,7 +75,7 @@ impl Debug for FuncInst { impl FuncAddr { /// Allocates a placeholder function (replaced later during instantiation). pub fn alloc_empty() -> FuncAddr { - FuncAddr(Rc::new(RefCell::new(FuncInst::RuntimeFunc { + FuncAddr(Rc::new(UnsafeCell::new(FuncInst::RuntimeFunc { type_: FuncType { params: Vec::new(), results: Vec::new(), @@ -93,7 +94,7 @@ impl FuncAddr { /// Allocates a WASI function instance. pub fn alloc_wasi(wasi_func_addr: WasiFuncAddr) -> FuncAddr { let func_type = wasi_func_addr.func_type.to_func_type(); - FuncAddr(Rc::new(RefCell::new(FuncInst::WasiFunc { + FuncAddr(Rc::new(UnsafeCell::new(FuncInst::WasiFunc { type_: func_type, wasi_func_addr, }))) @@ -102,7 +103,7 @@ impl FuncAddr { /// Replaces placeholder with actual function definition. pub fn replace(&self, func: Func, module: Weak) { let upgraded_module = module.upgrade().expect("Module weak ref expired"); - let func_type = upgraded_module.types.get_by_idx(func.type_.clone()).clone(); + let func_type = upgraded_module.types.get_by_idx(func.type_).clone(); drop(upgraded_module); let new_inst = FuncInst::RuntimeFunc { @@ -110,21 +111,30 @@ impl FuncAddr { module: module, code: func, }; - *self.0.borrow_mut() = new_inst; + // Safety: Single-threaded access, no overlapping borrows + unsafe { + *self.0.get() = new_inst; + } } - /// Returns the function's type signature. - pub fn func_type(&self) -> FuncType { - match &*self.0.borrow() { - FuncInst::RuntimeFunc { type_, .. } => type_.clone(), - FuncInst::HostFunc { type_, .. } => type_.clone(), - FuncInst::WasiFunc { type_, .. } => type_.clone(), + /// Returns a reference to the function's type signature. + /// Zero-copy access - no allocation. + #[inline] + pub fn func_type(&self) -> &FuncType { + // Safety: Single-threaded access, no overlapping mutable access + let inst = unsafe { &*self.0.get() }; + match inst { + FuncInst::RuntimeFunc { type_, .. } => type_, + FuncInst::HostFunc { type_, .. } => type_, + FuncInst::WasiFunc { type_, .. } => type_, } } /// Extracts runtime function details if this is a Wasm function. pub fn get_runtime_func_details(&self) -> Option<(FuncType, Weak, Func)> { - match &*self.0.borrow() { + // Safety: Single-threaded access + let inst = unsafe { &*self.0.get() }; + match inst { FuncInst::RuntimeFunc { type_, module, @@ -141,7 +151,9 @@ impl FuncAddr { FuncType, Rc) -> Result, RuntimeError>>, )> { - match &*self.0.borrow() { + // Safety: Single-threaded access + let inst = unsafe { &*self.0.get() }; + match inst { FuncInst::HostFunc { type_, host_code } => Some((type_.clone(), host_code.clone())), _ => None, } @@ -149,7 +161,9 @@ impl FuncAddr { /// Extracts WASI function details if this is a WASI function. pub fn get_wasi_func_details(&self) -> Option<(FuncType, WasiFuncAddr)> { - match &*self.0.borrow() { + // Safety: Single-threaded access + let inst = unsafe { &*self.0.get() }; + match inst { FuncInst::WasiFunc { type_, wasi_func_addr, @@ -158,13 +172,17 @@ impl FuncAddr { } } - /// Returns a borrow of the underlying function instance. - pub fn read_lock(&self) -> Ref { - self.0.borrow() + /// Returns a reference to the underlying function instance. + /// # Safety + /// Caller must ensure no mutable access occurs during the lifetime of the reference. + #[inline] + pub fn read_lock(&self) -> &FuncInst { + // Safety: Single-threaded access, caller ensures no mutable access + unsafe { &*self.0.get() } } /// Returns a reference to the inner Rc. - pub fn get_rc(&self) -> &Rc> { + pub fn get_rc(&self) -> &Rc> { &self.0 } } diff --git a/src/execution/global.rs b/src/execution/global.rs index 398e9c3..f549354 100644 --- a/src/execution/global.rs +++ b/src/execution/global.rs @@ -25,7 +25,7 @@ impl GlobalAddr { pub fn new(type_: &GlobalType, value: Val) -> GlobalAddr { GlobalAddr { global_inst: Rc::new(RefCell::new(GlobalInst { - _type_: type_.clone(), + _type_: *type_, value: value, })), } diff --git a/src/execution/mem.rs b/src/execution/mem.rs index 0a1d737..5f25141 100644 --- a/src/execution/mem.rs +++ b/src/execution/mem.rs @@ -1,18 +1,16 @@ //! Linear memory instances and load/store operations. -use crate::error::RuntimeError; use crate::structure::{instructions::Memarg, types::*}; -use byteorder::*; use serde::{Deserialize, Serialize}; -use std::cell::RefCell; -use std::io::Cursor; +use std::cell::UnsafeCell; use std::rc::Rc; -use typenum::*; /// Reference-counted handle to a memory instance. +/// Uses UnsafeCell for zero-cost memory access in the interpreter hot path. +/// Safety: WebAssembly execution is single-threaded and operations don't overlap. #[derive(Clone, Debug)] pub struct MemAddr { - mem_inst: Rc>, + mem_inst: Rc>, } /// Linear memory instance with bounds tracking. @@ -28,7 +26,7 @@ impl MemAddr { let min = (type_.0.min * 65536) as usize; let max = type_.0.max.map(|max| max); MemAddr { - mem_inst: Rc::new(RefCell::new(MemInst { + mem_inst: Rc::new(UnsafeCell::new(MemInst { _type_: MemType(Limits { min: min as u32, max, @@ -43,38 +41,41 @@ impl MemAddr { } /// Initializes memory region from data segment. - pub fn init(&self, offset: usize, init: &Vec) { - let mut addr_self = self.mem_inst.borrow_mut(); - addr_self.data[offset..offset + init.len()].copy_from_slice(init); + #[inline] + pub fn init(&self, offset: usize, init: &[u8]) { + // Safety: Single-threaded access, no overlapping borrows + let mem = unsafe { &mut *self.mem_inst.get() }; + mem.data[offset..offset + init.len()].copy_from_slice(init); } + /// Loads a typed value from memory at ptr + offset. - pub fn load(&self, arg: &Memarg, ptr: i32) -> Result { + /// No bounds checking - relies on host runtime for memory safety. + /// No heap allocation - reads directly from memory pointer. + #[inline] + pub fn load(&self, arg: &Memarg, ptr: i32) -> T { let pos = (ptr as usize) + (arg.offset as usize); - let len = ::len(); - let raw = self.mem_inst.borrow(); - - let data = unsafe { - let slice = std::slice::from_raw_parts(raw.data.as_ptr().add(pos), len); - slice.to_vec() - }; - Ok(::from_byte(data)) + // Safety: Single-threaded access, no overlapping borrows + let mem = unsafe { &*self.mem_inst.get() }; + unsafe { T::read_from_ptr(mem.data.as_ptr().add(pos)) } } + /// Stores a typed value to memory at ptr + offset. - pub fn store(&self, arg: &Memarg, ptr: i32, data: T) -> Result<(), RuntimeError> { + /// No bounds checking - relies on host runtime for memory safety. + /// No heap allocation - writes directly to memory pointer. + #[inline] + pub fn store(&self, arg: &Memarg, ptr: i32, data: T) { let pos = (ptr as usize) + (arg.offset as usize); - let buf = ::to_byte(data); - let mut raw = self.mem_inst.borrow_mut(); - - unsafe { - std::ptr::copy_nonoverlapping(buf.as_ptr(), raw.data.as_mut_ptr().add(pos), buf.len()); - } - - Ok(()) + // Safety: Single-threaded access, no overlapping borrows + let mem = unsafe { &mut *self.mem_inst.get() }; + unsafe { data.write_to_ptr(mem.data.as_mut_ptr().add(pos)) } } /// Returns current memory size in pages (64KB each). + #[inline] pub fn mem_size(&self) -> i32 { - (self.mem_inst.borrow().data.len() / 65536) as i32 + // Safety: Single-threaded access + let mem = unsafe { &*self.mem_inst.get() }; + (mem.data.len() / 65536) as i32 } /// Grows memory by the given number of pages. Returns previous size or -1 on failure. @@ -82,10 +83,9 @@ impl MemAddr { let prev_size = self.mem_size(); let new = prev_size + size; - let max_pages = { - let guard = self.mem_inst.borrow(); - guard._type_.0.max - }; + // Safety: Single-threaded access + let mem = unsafe { &*self.mem_inst.get() }; + let max_pages = mem._type_.0.max; if let Some(max) = max_pages { if new > max as i32 { @@ -96,205 +96,222 @@ impl MemAddr { if new > 65536 { -1 } else { - self.mem_inst - .borrow_mut() - .data - .resize(new as usize * 65536, 0); - + // Safety: Single-threaded access, no overlapping borrows + let mem = unsafe { &mut *self.mem_inst.get() }; + mem.data.resize(new as usize * 65536, 0); prev_size } } /// Returns a copy of all memory contents. - pub fn get_data(&self) -> Result, RuntimeError> { - let guard = self.mem_inst.borrow(); - Ok(guard.data.clone()) + #[inline] + pub fn get_data(&self) -> Vec { + // Safety: Single-threaded access + let mem = unsafe { &*self.mem_inst.get() }; + mem.data.clone() } /// Store multiple bytes at once (bulk operation) - pub fn store_bytes(&self, ptr: i32, data: &[u8]) -> Result<(), RuntimeError> { + /// No bounds checking - relies on host runtime for memory safety. + #[inline] + pub fn store_bytes(&self, ptr: i32, data: &[u8]) { let pos = ptr as usize; - let mut raw = self.mem_inst.borrow_mut(); + // Safety: Single-threaded access, no overlapping borrows + let mem = unsafe { &mut *self.mem_inst.get() }; unsafe { std::ptr::copy_nonoverlapping( data.as_ptr(), - raw.data.as_mut_ptr().add(pos), + mem.data.as_mut_ptr().add(pos), data.len(), ); } - - Ok(()) } /// Replaces all memory contents (used during restore). - pub fn set_data(&self, data: Vec) -> Result<(), RuntimeError> { - let mut guard = self.mem_inst.borrow_mut(); - guard.data = data; - Ok(()) + #[inline] + pub fn set_data(&self, data: Vec) { + // Safety: Single-threaded access, no overlapping borrows + let mem = unsafe { &mut *self.mem_inst.get() }; + mem.data = data; } /// Copies len bytes from src to dest within memory. - pub fn memory_copy(&self, dest: i32, src: i32, len: i32) -> Result<(), RuntimeError> { + /// No bounds checking - relies on host runtime for memory safety. + #[inline] + pub fn memory_copy(&self, dest: i32, src: i32, len: i32) { let dest_pos = dest as usize; let src_pos = src as usize; let len_usize = len as usize; - let mut raw = self.mem_inst.borrow_mut(); + // Safety: Single-threaded access, no overlapping borrows + let mem = unsafe { &mut *self.mem_inst.get() }; - if len_usize > 0 { - unsafe { - let src_ptr = raw.data.as_ptr().add(src_pos); - let dest_ptr = raw.data.as_mut_ptr().add(dest_pos); - std::ptr::copy(src_ptr, dest_ptr, len_usize); - } + unsafe { + let src_ptr = mem.data.as_ptr().add(src_pos); + let dest_ptr = mem.data.as_mut_ptr().add(dest_pos); + std::ptr::copy(src_ptr, dest_ptr, len_usize); } - - Ok(()) } /// Fills len bytes starting at dest with val. - pub fn memory_fill(&self, dest: i32, val: u8, len: i32) -> Result<(), RuntimeError> { + /// No bounds checking - relies on host runtime for memory safety. + #[inline] + pub fn memory_fill(&self, dest: i32, val: u8, len: i32) { let dest_pos = dest as usize; let len_usize = len as usize; - let mut raw = self.mem_inst.borrow_mut(); + // Safety: Single-threaded access, no overlapping borrows + let mem = unsafe { &mut *self.mem_inst.get() }; - if len_usize > 0 { - unsafe { - std::ptr::write_bytes(raw.data.as_mut_ptr().add(dest_pos), val, len_usize); - } + unsafe { + std::ptr::write_bytes(mem.data.as_mut_ptr().add(dest_pos), val, len_usize); } + } - Ok(()) + /// Returns a raw pointer to the memory data for direct access. + /// # Safety + /// Caller must ensure no mutable aliases exist during use. + #[inline] + pub unsafe fn get_data_ptr(&self) -> *const u8 { + (*self.mem_inst.get()).data.as_ptr() } - /// Returns a direct borrow of the memory instance. - pub fn get_memory_direct_access(&self) -> std::cell::Ref { - self.mem_inst.borrow() + /// Returns a mutable raw pointer to the memory data for direct access. + /// # Safety + /// Caller must ensure no other references exist during use. + #[inline] + pub unsafe fn get_data_mut_ptr(&self) -> *mut u8 { + (*self.mem_inst.get()).data.as_mut_ptr() + } + + /// Returns the length of the memory data. + #[inline] + pub fn data_len(&self) -> usize { + // Safety: Single-threaded access + let mem = unsafe { &*self.mem_inst.get() }; + mem.data.len() + } + + /// Returns a reference to the memory instance for direct access. + /// This is used by WASI functions that need to read memory directly. + /// # Safety + /// The returned reference must not be held across operations that could + /// mutate memory. Caller is responsible for ensuring no aliasing violations. + #[inline] + pub fn get_memory_direct_access(&self) -> &MemInst { + // Safety: Single-threaded access, caller must ensure no overlapping mutable access + unsafe { &*self.mem_inst.get() } } } /// Trait for types that can be loaded/stored from memory. +/// Uses direct pointer access to avoid heap allocations. pub trait ByteMem: Sized { - /// Size in bytes. - fn len() -> usize; - /// Deserialize from little-endian bytes. - fn from_byte(data: Vec) -> Self; - /// Serialize to little-endian bytes. - fn to_byte(self) -> Vec; + /// Read value directly from memory pointer (little-endian). + /// # Safety + /// Caller must ensure ptr is valid and properly aligned for the type. + unsafe fn read_from_ptr(ptr: *const u8) -> Self; + + /// Write value directly to memory pointer (little-endian). + /// # Safety + /// Caller must ensure ptr is valid and has enough space. + unsafe fn write_to_ptr(self, ptr: *mut u8); } impl ByteMem for i8 { - fn len() -> usize { - consts::U1::to_usize() + #[inline] + unsafe fn read_from_ptr(ptr: *const u8) -> i8 { + *ptr as i8 } - fn from_byte(data: Vec) -> i8 { - let mut reader = Cursor::new(data.as_slice()); - reader.read_i8().unwrap() - } - fn to_byte(self) -> Vec { - vec![self as u8] + #[inline] + unsafe fn write_to_ptr(self, ptr: *mut u8) { + *ptr = self as u8; } } + impl ByteMem for u8 { - fn len() -> usize { - consts::U1::to_usize() - } - fn from_byte(data: Vec) -> u8 { - let mut reader = Cursor::new(data.as_slice()); - reader.read_u8().unwrap() + #[inline] + unsafe fn read_from_ptr(ptr: *const u8) -> u8 { + *ptr } - fn to_byte(self) -> Vec { - vec![self] + #[inline] + unsafe fn write_to_ptr(self, ptr: *mut u8) { + *ptr = self; } } impl ByteMem for i16 { - fn len() -> usize { - consts::U2::to_usize() + #[inline] + unsafe fn read_from_ptr(ptr: *const u8) -> i16 { + std::ptr::read_unaligned(ptr as *const i16).to_le() } - fn from_byte(data: Vec) -> i16 { - let mut reader = Cursor::new(data.as_slice()); - reader.read_i16::().unwrap() - } - fn to_byte(self) -> Vec { - self.to_le_bytes().to_vec() + #[inline] + unsafe fn write_to_ptr(self, ptr: *mut u8) { + std::ptr::write_unaligned(ptr as *mut i16, self.to_le()); } } impl ByteMem for u16 { - fn len() -> usize { - consts::U2::to_usize() - } - fn from_byte(data: Vec) -> u16 { - let mut reader = Cursor::new(data.as_slice()); - reader.read_u16::().unwrap() + #[inline] + unsafe fn read_from_ptr(ptr: *const u8) -> u16 { + std::ptr::read_unaligned(ptr as *const u16).to_le() } - fn to_byte(self) -> Vec { - self.to_le_bytes().to_vec() + #[inline] + unsafe fn write_to_ptr(self, ptr: *mut u8) { + std::ptr::write_unaligned(ptr as *mut u16, self.to_le()); } } impl ByteMem for i32 { - fn len() -> usize { - consts::U4::to_usize() + #[inline] + unsafe fn read_from_ptr(ptr: *const u8) -> i32 { + std::ptr::read_unaligned(ptr as *const i32).to_le() } - fn from_byte(data: Vec) -> i32 { - let mut reader = Cursor::new(data.as_slice()); - reader.read_i32::().unwrap() - } - fn to_byte(self) -> Vec { - self.to_le_bytes().to_vec() + #[inline] + unsafe fn write_to_ptr(self, ptr: *mut u8) { + std::ptr::write_unaligned(ptr as *mut i32, self.to_le()); } } impl ByteMem for u32 { - fn len() -> usize { - consts::U4::to_usize() - } - fn from_byte(data: Vec) -> u32 { - let mut reader = Cursor::new(data.as_slice()); - reader.read_u32::().unwrap() + #[inline] + unsafe fn read_from_ptr(ptr: *const u8) -> u32 { + std::ptr::read_unaligned(ptr as *const u32).to_le() } - fn to_byte(self) -> Vec { - self.to_le_bytes().to_vec() + #[inline] + unsafe fn write_to_ptr(self, ptr: *mut u8) { + std::ptr::write_unaligned(ptr as *mut u32, self.to_le()); } } impl ByteMem for i64 { - fn len() -> usize { - consts::U8::to_usize() + #[inline] + unsafe fn read_from_ptr(ptr: *const u8) -> i64 { + std::ptr::read_unaligned(ptr as *const i64).to_le() } - fn from_byte(data: Vec) -> i64 { - let mut reader = Cursor::new(data.as_slice()); - reader.read_i64::().unwrap() - } - fn to_byte(self) -> Vec { - self.to_le_bytes().to_vec() + #[inline] + unsafe fn write_to_ptr(self, ptr: *mut u8) { + std::ptr::write_unaligned(ptr as *mut i64, self.to_le()); } } impl ByteMem for f32 { - fn len() -> usize { - consts::U4::to_usize() - } - fn from_byte(data: Vec) -> f32 { - let mut reader = Cursor::new(data.as_slice()); - reader.read_f32::().unwrap() + #[inline] + unsafe fn read_from_ptr(ptr: *const u8) -> f32 { + f32::from_bits(std::ptr::read_unaligned(ptr as *const u32).to_le()) } - fn to_byte(self) -> Vec { - self.to_le_bytes().to_vec() + #[inline] + unsafe fn write_to_ptr(self, ptr: *mut u8) { + std::ptr::write_unaligned(ptr as *mut u32, self.to_bits().to_le()); } } impl ByteMem for f64 { - fn len() -> usize { - consts::U8::to_usize() - } - fn from_byte(data: Vec) -> f64 { - let mut reader = Cursor::new(data.as_slice()); - reader.read_f64::().unwrap() + #[inline] + unsafe fn read_from_ptr(ptr: *const u8) -> f64 { + f64::from_bits(std::ptr::read_unaligned(ptr as *const u64).to_le()) } - fn to_byte(self) -> Vec { - self.to_le_bytes().to_vec() + #[inline] + unsafe fn write_to_ptr(self, ptr: *mut u8) { + std::ptr::write_unaligned(ptr as *mut u64, self.to_bits().to_le()); } } diff --git a/src/execution/migration.rs b/src/execution/migration.rs index 285fbca..525a4df 100644 --- a/src/execution/migration.rs +++ b/src/execution/migration.rs @@ -99,7 +99,7 @@ pub fn checkpoint>( // 1. Gather Memory state let memory_data = if let Some(mem_addr) = mem_addrs.get(0) { - mem_addr.get_data()? + mem_addr.get_data() } else { Vec::new() }; @@ -178,7 +178,7 @@ pub fn restore>( // 3. Restore memory state into module_inst // Assuming only one memory instance for now if let Some(mem_addr) = module_inst.mem_addrs.get(0) { - mem_addr.set_data(state.memory_data)?; + mem_addr.set_data(state.memory_data); println!("Memory state restored into module instance."); } else if !state.memory_data.is_empty() { eprintln!("Warning: Checkpoint contains memory data, but module has no memory instance."); diff --git a/src/execution/module.rs b/src/execution/module.rs index a946771..6826a00 100644 --- a/src/execution/module.rs +++ b/src/execution/module.rs @@ -225,7 +225,7 @@ impl ModuleInst { .replace(func.clone(), Rc::downgrade(&arc_module_inst)); } if let Some(start) = &module.start { - arc_module_inst.func_addrs.get_by_idx(start.func.clone()); + arc_module_inst.func_addrs.get_by_idx(start.func); } Ok(arc_module_inst) } @@ -253,7 +253,7 @@ impl ModuleInst { &[Instr::F64Const(i)] => Some(Val::Num(Num::F64(i as f64))), &[Instr::V128Const(i)] => Some(Val::Vec_(Vec_::V128(i))), &[Instr::RefNull(_)] => Some(Val::Ref(Ref::RefNull)), - [Instr::GlobalGet(i)] => Some(self.global_addrs.get_by_idx(i.clone()).get()), + [Instr::GlobalGet(i)] => Some(self.global_addrs.get_by_idx(*i).get()), _ => None, } } diff --git a/src/execution/regs.rs b/src/execution/regs.rs index cc50d8a..1055aa2 100644 --- a/src/execution/regs.rs +++ b/src/execution/regs.rs @@ -464,7 +464,7 @@ impl RegAllocator { /// Push a value onto the stack (allocate a new register) pub fn push(&mut self, vtype: ValueType) -> Reg { - self.type_stack.push(vtype.clone()); + self.type_stack.push(vtype); match vtype { ValueType::NumType(NumType::I32) => { let reg = Reg::I32(self.i32_depth as u16); @@ -604,7 +604,7 @@ impl RegAllocator { /// Pop the top value from the stack (using type_stack to determine the type) pub fn pop_any(&mut self) -> Option { - let vtype = self.type_stack.last()?.clone(); + let vtype = *self.type_stack.last()?; Some(self.pop(&vtype)) } diff --git a/src/execution/runtime.rs b/src/execution/runtime.rs index 5e4a67b..7a08b91 100644 --- a/src/execution/runtime.rs +++ b/src/execution/runtime.rs @@ -12,6 +12,7 @@ use crate::execution::vm::{Frame, FrameStack, Label, LabelStack, ModuleLevelInst use crate::structure::module::WasiFuncType; use crate::structure::types::{NumType, ValueType, VecType}; use crate::wasi::{WasiError, WasiResult}; +use arrayvec::ArrayVec; use std::path::Path; use std::rc::Rc; #[cfg(all(target_os = "wasi", target_env = "p1", target_feature = "atomics"))] @@ -210,7 +211,7 @@ impl Runtime { result_reg, }) => { // Call WASI function directly with params from registers - match self.call_wasi_function(&wasi_func_type, params) { + match self.call_wasi_function(&wasi_func_type, ¶ms) { Ok(result) => { if let Some(reg) = result_reg { if let Some(val) = result { @@ -264,7 +265,7 @@ impl Runtime { locals, module: func_module_weak.clone(), n: type_.results.len(), - result_reg: code.result_reg.clone(), + result_reg: code.result_reg, }, label_stack: vec![LabelStack { label: Label { @@ -280,8 +281,8 @@ impl Runtime { void: type_.results.is_empty(), instruction_count: 0, enable_checkpoint: self.enable_checkpoint, - result_regs: vec![], - return_result_regs: vec![], + result_regs: ArrayVec::new(), + return_result_regs: ArrayVec::new(), }; self.stacks.activation_frame_stack.push(new_frame); } @@ -309,20 +310,22 @@ impl Runtime { // Pop register file frame but keep reference for reading return values let finished_frame = self.stacks.activation_frame_stack.pop().unwrap(); let expected_n = finished_frame.frame.n; - let return_result_regs = finished_frame.return_result_regs.clone(); + let return_result_regs = finished_frame.return_result_regs; if self.stacks.activation_frame_stack.is_empty() { // Read values from registers before restoring - let values_to_pass: Vec = return_result_regs + // Use ArrayVec to avoid heap allocation (most functions return 0-2 values) + let values_to_pass: ArrayVec = return_result_regs .iter() .take(expected_n) .map(|reg| self.stacks.reg_file.get_val(reg)) .collect(); self.stacks.reg_file.restore_offsets(); - return Ok(values_to_pass); + return Ok(values_to_pass.into_iter().collect()); } else { // First read values from finished frame's registers (before restore) - let values_to_pass: Vec = return_result_regs + // Use ArrayVec to avoid heap allocation + let values_to_pass: ArrayVec = return_result_regs .iter() .map(|reg| self.stacks.reg_file.get_val(reg)) .collect(); @@ -355,7 +358,7 @@ impl Runtime { fn call_wasi_function( &self, func_type: &WasiFuncType, - params: Vec, + params: &[Val], ) -> WasiResult> { let wasi_impl = self .module_inst diff --git a/src/execution/table.rs b/src/execution/table.rs index fcd3eda..e162227 100644 --- a/src/execution/table.rs +++ b/src/execution/table.rs @@ -25,7 +25,7 @@ impl TableAddr { /// Creates a new table initialized with null references. pub fn new(type_: &TableType) -> TableAddr { TableAddr(Rc::new(RefCell::new(TableInst { - _type_: type_.clone(), + _type_: *type_, elem: { let min = type_.0.min as usize; let mut vec = Vec::with_capacity(min); diff --git a/src/execution/value.rs b/src/execution/value.rs index dcf54f4..afeab3b 100644 --- a/src/execution/value.rs +++ b/src/execution/value.rs @@ -61,7 +61,7 @@ impl Val { } /// Numeric value variants (i32, i64, f32, f64). -#[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] +#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq)] pub enum Num { I32(i32), I64(i64), diff --git a/src/execution/vm.rs b/src/execution/vm.rs index 83447b4..50e78c2 100644 --- a/src/execution/vm.rs +++ b/src/execution/vm.rs @@ -28,10 +28,15 @@ use crate::execution::{ use crate::structure::module::WasiFuncType; use crate::structure::types::LabelIdx as StructureLabelIdx; use crate::structure::{instructions::*, types::*}; +use arrayvec::ArrayVec; use lazy_static::lazy_static; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::rc::{Rc, Weak}; +/// Type alias for boxed register slice (ProcessedInstr use). +/// 16 bytes vs Vec's 24 bytes, no capacity overhead. +pub type RegSlice = Box<[Reg]>; + /// Value source for hybrid stack/register approach. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ValueSource { @@ -137,6 +142,14 @@ pub enum Operand { Optimized(OptimizedOperand), } +/// Destination that can be either a register or a local variable. +/// Used for instructions where dst folding is applied. +#[derive(Clone, Copy, Debug, Serialize, Deserialize)] +pub enum RegOrLocal { + Reg(u16), // Write to register + Local(u16), // Write to local variable +} + /// Register-based operand for I32 operations #[derive(Clone, Copy, Debug, Serialize, Deserialize)] pub enum I32RegOperand { @@ -219,53 +232,53 @@ pub enum ProcessedInstr { /// Register-based I32 instruction I32Reg { handler_index: usize, - dst: u16, // Destination register index + dst: I32RegOperand, // Destination: Reg(idx) or Param(local_idx) src1: I32RegOperand, // First operand src2: Option, // Second operand (None for unary ops) }, /// Register-based I64 instruction I64Reg { handler_index: usize, - dst: Reg, // Destination register (I64 for arithmetic, I32 for comparisons) + dst: I64RegOperand, // Destination (Reg for register, Param for local variable) src1: I64RegOperand, src2: Option, }, F32Reg { handler_index: usize, - dst: Reg, + dst: F32RegOperand, src1: F32RegOperand, src2: Option, }, F64Reg { handler_index: usize, - dst: Reg, + dst: F64RegOperand, src1: F64RegOperand, src2: Option, }, ConversionReg { handler_index: usize, - dst: Reg, // Destination register (type determined by output type) - src: Reg, // Source register (type determined by input type) + dst: RegOrLocal, // Destination register or local (type determined by output type) + src: Reg, // Source register (type determined by input type) }, MemoryLoadReg { handler_index: usize, - dst: Reg, // Destination register for loaded value - addr: Reg, // Address register (always I32) - offset: u64, // Memory offset + dst: RegOrLocal, // Destination (register or local for dst folding) + addr: I32RegOperand, // Address operand (can be folded) + offset: u64, // Memory offset }, MemoryStoreReg { handler_index: usize, - addr: Reg, // Address register (always I32) - value: Reg, // Value register to store - offset: u64, // Memory offset + addr: I32RegOperand, // Address operand (can be folded) + value: Reg, // Value register to store + offset: u64, // Memory offset }, MemoryOpsReg { handler_index: usize, dst: Option, // Destination register (for size/grow results) - args: Vec, // Argument registers (varies by operation) + args: RegSlice, // Argument registers (varies by operation) data_index: u32, // Data segment index (for memory.init) }, @@ -279,13 +292,13 @@ pub enum ProcessedInstr { GlobalGetReg { handler_index: usize, - dst: Reg, // Destination register + dst: RegOrLocal, // Destination register or local global_index: u32, // Global variable index }, GlobalSetReg { handler_index: usize, - src: Reg, // Source register + src: RegOrLocal, // Source register or local global_index: u32, // Global variable index }, @@ -309,23 +322,23 @@ pub enum ProcessedInstr { }, CallWasiReg { wasi_func_type: WasiFuncType, - param_regs: Vec, // Parameter registers + param_regs: RegSlice, // Parameter registers result_reg: Option, // Result register (most WASI functions return i32) }, CallIndirectReg { type_idx: TypeIdx, table_idx: TableIdx, index_reg: Reg, // Table index register - param_regs: Vec, // Parameter registers - result_regs: Vec, // Result registers + param_regs: RegSlice, // Parameter registers + result_regs: RegSlice, // Result registers }, CallReg { func_idx: FuncIdx, - param_regs: Vec, // Parameter registers - result_regs: Vec, // Result registers + param_regs: RegSlice, // Parameter registers + result_regs: RegSlice, // Result registers }, ReturnReg { - result_regs: Vec, // Result registers to return + result_regs: RegSlice, // Result registers to return }, /// Unconditional jump (for Else) JumpReg { target_ip: usize }, @@ -344,30 +357,30 @@ pub enum ProcessedInstr { }, /// End of block/loop/if EndReg { - source_regs: Vec, - target_result_regs: Vec, + source_regs: RegSlice, + target_result_regs: RegSlice, }, /// Unconditional branch BrReg { relative_depth: u32, target_ip: usize, // Target instruction pointer (set by fixup) - source_regs: Vec, - target_result_regs: Vec, + source_regs: RegSlice, + target_result_regs: RegSlice, }, /// Conditional branch BrIfReg { relative_depth: u32, target_ip: usize, // Target instruction pointer (set by fixup) cond_reg: Reg, - source_regs: Vec, - target_result_regs: Vec, + source_regs: RegSlice, + target_result_regs: RegSlice, }, /// Branch table BrTableReg { - targets: Vec<(u32, usize, Vec)>, // (relative_depth, target_ip, target_result_regs) for each target - default_target: (u32, usize, Vec), // (relative_depth, target_ip, target_result_regs) for default + targets: Vec<(u32, usize, RegSlice)>, // (relative_depth, target_ip, target_result_regs) for each target + default_target: (u32, usize, RegSlice), // (relative_depth, target_ip, target_result_regs) for default index_reg: Reg, // Index register - source_regs: Vec, // Source registers (same for all targets) + source_regs: RegSlice, // Source registers (same for all targets) }, /// No operation NopReg, @@ -426,8 +439,8 @@ pub enum ModuleLevelInstr { /// Invoke WebAssembly function with register-based parameters. InvokeReg { func_addr: FuncAddr, - params: Vec, // Parameters read from registers - result_regs: Vec, // Registers to write results to + params: Vec, // Parameters read from registers + result_regs: ArrayVec, // Registers to write results to (max 8) }, } @@ -767,8 +780,8 @@ impl VMState { void: type_.results.is_empty(), instruction_count: 0, enable_checkpoint: false, - result_regs: vec![], - return_result_regs: vec![], + result_regs: ArrayVec::new(), + return_result_regs: ArrayVec::new(), }; Ok(VMState { @@ -814,10 +827,10 @@ pub struct FrameStack { pub instruction_count: u64, #[serde(skip)] pub enable_checkpoint: bool, - /// Registers where caller expects results to be written - pub result_regs: Vec, - /// Registers containing this function's return values (set on return) - pub return_result_regs: Vec, + /// Registers where caller expects results to be written (max 8 for multi-value) + pub result_regs: ArrayVec, + /// Registers containing this function's return values (set on return, max 8) + pub return_result_regs: ArrayVec, } impl FrameStack { @@ -930,19 +943,23 @@ impl FrameStack { // Trace instruction execution (compile-time feature gate) #[cfg(feature = "trace")] if let Some(ref mut t) = tracer { - let global_addrs = self - .frame - .module - .upgrade() - .map(|m| m.global_addrs.clone()) - .unwrap_or_default(); - t.trace_instruction( - ip, - instruction_ref.handler_index(), - reg_file, - &self.frame.locals, - &global_addrs, - ); + if let Some(m) = self.frame.module.upgrade() { + t.trace_instruction( + ip, + instruction_ref.handler_index(), + reg_file, + &self.frame.locals, + &m.global_addrs, + ); + } else { + t.trace_instruction( + ip, + instruction_ref.handler_index(), + reg_file, + &self.frame.locals, + &[], + ); + } } // Match on instruction type @@ -957,12 +974,13 @@ impl FrameStack { let ctx = I32RegContext { reg_file: reg_file.get_i32_regs(), locals: &mut self.frame.locals, - src1: src1.clone(), - src2: src2.clone(), + dst: *dst, + src1: *src1, + src2: *src2, }; let handler = I32_REG_HANDLER_TABLE[*handler_index]; - handler(ctx, *dst)?; + handler(ctx)?; self.label_stack[current_label_stack_idx].ip = ip + 1; continue; @@ -979,9 +997,9 @@ impl FrameStack { i64_regs, i32_regs, locals: &mut self.frame.locals, - src1: src1.clone(), - src2: src2.clone(), - dst: dst.clone(), + dst: *dst, + src1: *src1, + src2: *src2, }; let handler = I64_REG_HANDLER_TABLE[*handler_index]; @@ -1002,9 +1020,9 @@ impl FrameStack { f32_regs, i32_regs, locals: &mut self.frame.locals, - src1: src1.clone(), - src2: src2.clone(), - dst: dst.clone(), + src1: *src1, + src2: *src2, + dst: *dst, }; let handler = F32_REG_HANDLER_TABLE[*handler_index]; @@ -1025,9 +1043,9 @@ impl FrameStack { f64_regs, i32_regs, locals: &mut self.frame.locals, - src1: src1.clone(), - src2: src2.clone(), - dst: dst.clone(), + src1: *src1, + src2: *src2, + dst: *dst, }; let handler = F64_REG_HANDLER_TABLE[*handler_index]; @@ -1043,8 +1061,9 @@ impl FrameStack { } => { let ctx = ConversionRegContext { reg_file, - src: src.clone(), - dst: dst.clone(), + locals: &mut self.frame.locals, + src: *src, + dst: *dst, }; let handler = CONVERSION_REG_HANDLER_TABLE[*handler_index]; @@ -1071,9 +1090,10 @@ impl FrameStack { let ctx = MemoryLoadRegContext { reg_file, + locals: &mut self.frame.locals, mem_addr, - addr: addr.clone(), - dst: dst.clone(), + addr: *addr, + dst: *dst, offset: *offset, }; @@ -1101,9 +1121,10 @@ impl FrameStack { let ctx = MemoryStoreRegContext { reg_file, + locals: &self.frame.locals, mem_addr, - addr: addr.clone(), - value: value.clone(), + addr: *addr, + value: *value, offset: *offset, }; @@ -1133,8 +1154,8 @@ impl FrameStack { reg_file, mem_addr, module_inst: &module_inst, - dst: dst.clone(), - args: args.clone(), + dst: *dst, + args, data_index: *data_index, }; @@ -1153,10 +1174,10 @@ impl FrameStack { } => { let ctx = SelectRegContext { reg_file, - dst: dst.clone(), - val1: val1.clone(), - val2: val2.clone(), - cond: cond.clone(), + dst: *dst, + val1: *val1, + val2: *val2, + cond: *cond, }; let handler = SELECT_REG_HANDLER_TABLE[*handler_index]; @@ -1183,16 +1204,40 @@ impl FrameStack { match *handler_index { HANDLER_IDX_GLOBAL_GET_I32 => { - reg_file.set_i32(dst.index(), val.to_i32().unwrap()); + let v = val.to_i32().unwrap(); + match dst { + RegOrLocal::Reg(idx) => reg_file.set_i32(*idx, v), + RegOrLocal::Local(idx) => { + self.frame.locals[*idx as usize] = Val::Num(Num::I32(v)) + } + } } HANDLER_IDX_GLOBAL_GET_I64 => { - reg_file.set_i64(dst.index(), val.to_i64().unwrap()); + let v = val.to_i64().unwrap(); + match dst { + RegOrLocal::Reg(idx) => reg_file.set_i64(*idx, v), + RegOrLocal::Local(idx) => { + self.frame.locals[*idx as usize] = Val::Num(Num::I64(v)) + } + } } HANDLER_IDX_GLOBAL_GET_F32 => { - reg_file.set_f32(dst.index(), val.to_f32().unwrap()); + let v = val.to_f32().unwrap(); + match dst { + RegOrLocal::Reg(idx) => reg_file.set_f32(*idx, v), + RegOrLocal::Local(idx) => { + self.frame.locals[*idx as usize] = Val::Num(Num::F32(v)) + } + } } HANDLER_IDX_GLOBAL_GET_F64 => { - reg_file.set_f64(dst.index(), val.to_f64().unwrap()); + let v = val.to_f64().unwrap(); + match dst { + RegOrLocal::Reg(idx) => reg_file.set_f64(*idx, v), + RegOrLocal::Local(idx) => { + self.frame.locals[*idx as usize] = Val::Num(Num::F64(v)) + } + } } _ => return Err(RuntimeError::InvalidHandlerIndex), } @@ -1207,16 +1252,40 @@ impl FrameStack { } => { let val = match *handler_index { HANDLER_IDX_GLOBAL_SET_I32 => { - Val::Num(Num::I32(reg_file.get_i32(src.index()))) + let v = match src { + RegOrLocal::Reg(idx) => reg_file.get_i32(*idx), + RegOrLocal::Local(idx) => { + self.frame.locals[*idx as usize].to_i32().unwrap() + } + }; + Val::Num(Num::I32(v)) } HANDLER_IDX_GLOBAL_SET_I64 => { - Val::Num(Num::I64(reg_file.get_i64(src.index()))) + let v = match src { + RegOrLocal::Reg(idx) => reg_file.get_i64(*idx), + RegOrLocal::Local(idx) => { + self.frame.locals[*idx as usize].to_i64().unwrap() + } + }; + Val::Num(Num::I64(v)) } HANDLER_IDX_GLOBAL_SET_F32 => { - Val::Num(Num::F32(reg_file.get_f32(src.index()))) + let v = match src { + RegOrLocal::Reg(idx) => reg_file.get_f32(*idx), + RegOrLocal::Local(idx) => { + self.frame.locals[*idx as usize].to_f32().unwrap() + } + }; + Val::Num(Num::F32(v)) } HANDLER_IDX_GLOBAL_SET_F64 => { - Val::Num(Num::F64(reg_file.get_f64(src.index()))) + let v = match src { + RegOrLocal::Reg(idx) => reg_file.get_f64(*idx), + RegOrLocal::Local(idx) => { + self.frame.locals[*idx as usize].to_f64().unwrap() + } + }; + Val::Num(Num::F64(v)) } _ => return Err(RuntimeError::InvalidHandlerIndex), }; @@ -1303,7 +1372,8 @@ impl FrameStack { result_reg, } => { let wasi_func_type_copy = *wasi_func_type; - let param_regs_copy = param_regs.clone(); + // Copy to stack instead of heap clone + let param_regs_copy: ArrayVec = param_regs.iter().copied().collect(); let result_reg_copy = *result_reg; // Read parameters from registers @@ -1329,8 +1399,9 @@ impl FrameStack { let type_idx = *type_idx; let table_idx = *table_idx; let index_reg = *index_reg; - let param_regs = param_regs.clone(); - let result_regs = result_regs.clone(); + // Copy to stack instead of heap clone + let param_regs: ArrayVec = param_regs.iter().copied().collect(); + let result_regs: ArrayVec = result_regs.iter().copied().collect(); // Read table index from register let i = reg_file.get_i32(index_reg.index()); @@ -1351,7 +1422,7 @@ impl FrameStack { let actual_type = func_addr.func_type(); let expected_type = &module_inst.types[type_idx.0 as usize]; - if actual_type != *expected_type { + if *actual_type != *expected_type { return Err(RuntimeError::IndirectCallTypeMismatch); } @@ -1376,7 +1447,7 @@ impl FrameStack { return Ok(Ok(Some(ModuleLevelInstr::InvokeReg { func_addr, params, - result_regs, + result_regs: result_regs.iter().copied().collect(), }))); } else { return Err(RuntimeError::UninitializedElement); @@ -1388,8 +1459,9 @@ impl FrameStack { result_regs, } => { let func_idx = *func_idx; - let param_regs = param_regs.clone(); - let result_regs = result_regs.clone(); + // Copy to stack instead of heap clone + let param_regs: ArrayVec = param_regs.iter().copied().collect(); + let result_regs: ArrayVec = result_regs.iter().copied().collect(); let module_inst = self .frame @@ -1425,12 +1497,12 @@ impl FrameStack { return Ok(Ok(Some(ModuleLevelInstr::InvokeReg { func_addr, params, - result_regs, + result_regs: result_regs.iter().copied().collect(), }))); } ProcessedInstr::ReturnReg { result_regs } => { // Store result registers for caller to read - self.return_result_regs = result_regs.clone(); + self.return_result_regs = result_regs.iter().copied().collect(); return Ok(Ok(Some(ModuleLevelInstr::Return))); } @@ -1528,8 +1600,10 @@ impl FrameStack { source_regs, target_result_regs, } => { - let source_regs = source_regs.clone(); - let target_result_regs = target_result_regs.clone(); + // Copy to stack instead of heap clone + let source_regs: ArrayVec = source_regs.iter().copied().collect(); + let target_result_regs: ArrayVec = + target_result_regs.iter().copied().collect(); // Pop label stack if self.label_stack.len() > 1 { @@ -1544,12 +1618,12 @@ impl FrameStack { let parent_processed_code = &self.label_stack[current_label_stack_idx].processed_instrs; if ip + 1 >= parent_processed_code.len() && current_label_stack_idx == 0 { - self.return_result_regs = source_regs.clone(); + self.return_result_regs = source_regs.iter().copied().collect(); break; } } else { // Function level end: store result registers for return - self.return_result_regs = source_regs.clone(); + self.return_result_regs = source_regs.iter().copied().collect(); break; } continue; @@ -1562,8 +1636,10 @@ impl FrameStack { } => { let relative_depth = *relative_depth as usize; let target_ip = *target_ip; - let source_regs = source_regs.clone(); - let target_result_regs = target_result_regs.clone(); + // Copy to stack instead of heap clone + let source_regs: ArrayVec = source_regs.iter().copied().collect(); + let target_result_regs: ArrayVec = + target_result_regs.iter().copied().collect(); if !source_regs.is_empty() && !target_result_regs.is_empty() { reg_file.copy_regs(&source_regs, &target_result_regs); @@ -1597,8 +1673,10 @@ impl FrameStack { let relative_depth = *relative_depth as usize; let target_ip = *target_ip; let cond_reg = *cond_reg; - let source_regs = source_regs.clone(); - let target_result_regs = target_result_regs.clone(); + // Copy to stack instead of heap clone + let source_regs: ArrayVec = source_regs.iter().copied().collect(); + let target_result_regs: ArrayVec = + target_result_regs.iter().copied().collect(); // Read condition from register let cond = reg_file.get_i32(cond_reg.index()); @@ -1632,21 +1710,25 @@ impl FrameStack { index_reg, source_regs, } => { - let targets = targets.clone(); - let default_target = default_target.clone(); let index_reg = *index_reg; - let source_regs = source_regs.clone(); + // Copy source_regs to stack instead of heap clone + let source_regs: ArrayVec = source_regs.iter().copied().collect(); // Read index from register let idx = reg_file.get_i32(index_reg.index()) as usize; - // Select target (relative_depth, target_ip, target_result_regs) tuple - let (relative_depth, target_ip, target_result_regs) = if idx < targets.len() { - targets[idx].clone() + // Select target and extract values without cloning entire Vec + let (relative_depth, target_ip, target_result_regs): ( + usize, + usize, + ArrayVec, + ) = if idx < targets.len() { + let (depth, ip, regs) = &targets[idx]; + (*depth as usize, *ip, regs.iter().copied().collect()) } else { - default_target + let (depth, ip, regs) = default_target; + (*depth as usize, *ip, regs.iter().copied().collect()) }; - let relative_depth = relative_depth as usize; // Copy source to target result registers if !source_regs.is_empty() && !target_result_regs.is_empty() { @@ -1761,6 +1843,7 @@ pub enum AdminInstr { struct I32RegContext<'a> { reg_file: &'a mut [i32], locals: &'a mut [Val], + dst: I32RegOperand, src1: I32RegOperand, src2: Option, } @@ -1774,259 +1857,268 @@ impl<'a> I32RegContext<'a> { I32RegOperand::Param(idx) => self.locals[*idx as usize].to_i32(), } } + + #[inline] + fn set_dst(&mut self, val: i32) { + match &self.dst { + I32RegOperand::Reg(idx) => self.reg_file[*idx as usize] = val, + I32RegOperand::Param(idx) => self.locals[*idx as usize] = Val::Num(Num::I32(val)), + I32RegOperand::Const(_) => unreachable!("Cannot write to Const dst"), + } + } } -fn i32_reg_local_get(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_local_get(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.reg_file[dst as usize] = val; + ctx.set_dst(val); Ok(()) } -fn i32_reg_local_set(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { - // dst is the local variable index, src1 is the value source +fn i32_reg_local_set(mut ctx: I32RegContext) -> Result<(), RuntimeError> { + // dst is the local variable (Param), src1 is the value source let val = ctx.get_operand(&ctx.src1)?; - ctx.locals[dst as usize] = Val::Num(Num::I32(val)); + ctx.set_dst(val); Ok(()) } -fn i32_reg_const(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_const(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.reg_file[dst as usize] = val; + ctx.set_dst(val); Ok(()) } -fn i32_reg_add(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_add(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = lhs.wrapping_add(rhs); + ctx.set_dst(lhs.wrapping_add(rhs)); Ok(()) } -fn i32_reg_sub(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_sub(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = lhs.wrapping_sub(rhs); + ctx.set_dst(lhs.wrapping_sub(rhs)); Ok(()) } -fn i32_reg_mul(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_mul(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = lhs.wrapping_mul(rhs); + ctx.set_dst(lhs.wrapping_mul(rhs)); Ok(()) } -fn i32_reg_div_s(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_div_s(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; if rhs == 0 { return Err(RuntimeError::ZeroDivideError); } - ctx.reg_file[dst as usize] = lhs.wrapping_div(rhs); + ctx.set_dst(lhs.wrapping_div(rhs)); Ok(()) } -fn i32_reg_div_u(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_div_u(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; let rhs_u = rhs as u32; if rhs_u == 0 { return Err(RuntimeError::ZeroDivideError); } - ctx.reg_file[dst as usize] = ((lhs as u32) / rhs_u) as i32; + ctx.set_dst(((lhs as u32) / rhs_u) as i32); Ok(()) } -fn i32_reg_rem_s(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_rem_s(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; if rhs == 0 { return Err(RuntimeError::ZeroDivideError); } - ctx.reg_file[dst as usize] = lhs.wrapping_rem(rhs); + ctx.set_dst(lhs.wrapping_rem(rhs)); Ok(()) } -fn i32_reg_rem_u(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_rem_u(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; let rhs_u = rhs as u32; if rhs_u == 0 { return Err(RuntimeError::ZeroDivideError); } - ctx.reg_file[dst as usize] = ((lhs as u32) % rhs_u) as i32; + ctx.set_dst(((lhs as u32) % rhs_u) as i32); Ok(()) } -fn i32_reg_and(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_and(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = lhs & rhs; + ctx.set_dst(lhs & rhs); Ok(()) } -fn i32_reg_or(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_or(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = lhs | rhs; + ctx.set_dst(lhs | rhs); Ok(()) } -fn i32_reg_xor(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_xor(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = lhs ^ rhs; + ctx.set_dst(lhs ^ rhs); Ok(()) } -fn i32_reg_shl(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_shl(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = lhs.wrapping_shl(rhs as u32); + ctx.set_dst(lhs.wrapping_shl(rhs as u32)); Ok(()) } -fn i32_reg_shr_s(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_shr_s(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = lhs.wrapping_shr(rhs as u32); + ctx.set_dst(lhs.wrapping_shr(rhs as u32)); Ok(()) } -fn i32_reg_shr_u(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_shr_u(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = ((lhs as u32).wrapping_shr(rhs as u32)) as i32; + ctx.set_dst(((lhs as u32).wrapping_shr(rhs as u32)) as i32); Ok(()) } -fn i32_reg_rotl(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_rotl(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = lhs.rotate_left(rhs as u32); + ctx.set_dst(lhs.rotate_left(rhs as u32)); Ok(()) } -fn i32_reg_rotr(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_rotr(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = lhs.rotate_right(rhs as u32); + ctx.set_dst(lhs.rotate_right(rhs as u32)); Ok(()) } // Comparison handlers -fn i32_reg_eq(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_eq(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = if lhs == rhs { 1 } else { 0 }; + ctx.set_dst(if lhs == rhs { 1 } else { 0 }); Ok(()) } -fn i32_reg_ne(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_ne(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = if lhs != rhs { 1 } else { 0 }; + ctx.set_dst(if lhs != rhs { 1 } else { 0 }); Ok(()) } -fn i32_reg_lt_s(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_lt_s(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = if lhs < rhs { 1 } else { 0 }; + ctx.set_dst(if lhs < rhs { 1 } else { 0 }); Ok(()) } -fn i32_reg_lt_u(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_lt_u(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = if (lhs as u32) < (rhs as u32) { 1 } else { 0 }; + ctx.set_dst(if (lhs as u32) < (rhs as u32) { 1 } else { 0 }); Ok(()) } -fn i32_reg_le_s(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_le_s(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = if lhs <= rhs { 1 } else { 0 }; + ctx.set_dst(if lhs <= rhs { 1 } else { 0 }); Ok(()) } -fn i32_reg_le_u(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_le_u(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = if (lhs as u32) <= (rhs as u32) { 1 } else { 0 }; + ctx.set_dst(if (lhs as u32) <= (rhs as u32) { 1 } else { 0 }); Ok(()) } -fn i32_reg_gt_s(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_gt_s(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = if lhs > rhs { 1 } else { 0 }; + ctx.set_dst(if lhs > rhs { 1 } else { 0 }); Ok(()) } -fn i32_reg_gt_u(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_gt_u(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = if (lhs as u32) > (rhs as u32) { 1 } else { 0 }; + ctx.set_dst(if (lhs as u32) > (rhs as u32) { 1 } else { 0 }); Ok(()) } -fn i32_reg_ge_s(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_ge_s(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = if lhs >= rhs { 1 } else { 0 }; + ctx.set_dst(if lhs >= rhs { 1 } else { 0 }); Ok(()) } -fn i32_reg_ge_u(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_ge_u(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.reg_file[dst as usize] = if (lhs as u32) >= (rhs as u32) { 1 } else { 0 }; + ctx.set_dst(if (lhs as u32) >= (rhs as u32) { 1 } else { 0 }); Ok(()) } // Unary operation handlers -fn i32_reg_clz(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_clz(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.reg_file[dst as usize] = val.leading_zeros() as i32; + ctx.set_dst(val.leading_zeros() as i32); Ok(()) } -fn i32_reg_ctz(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_ctz(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.reg_file[dst as usize] = val.trailing_zeros() as i32; + ctx.set_dst(val.trailing_zeros() as i32); Ok(()) } -fn i32_reg_popcnt(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_popcnt(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.reg_file[dst as usize] = val.count_ones() as i32; + ctx.set_dst(val.count_ones() as i32); Ok(()) } -fn i32_reg_eqz(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_eqz(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.reg_file[dst as usize] = if val == 0 { 1 } else { 0 }; + ctx.set_dst(if val == 0 { 1 } else { 0 }); Ok(()) } -fn i32_reg_extend8_s(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_extend8_s(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.reg_file[dst as usize] = (val as i8) as i32; + ctx.set_dst((val as i8) as i32); Ok(()) } -fn i32_reg_extend16_s(ctx: I32RegContext, dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_extend16_s(mut ctx: I32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.reg_file[dst as usize] = (val as i16) as i32; + ctx.set_dst((val as i16) as i32); Ok(()) } // Handler function type -type I32RegHandler = fn(I32RegContext, u16) -> Result<(), RuntimeError>; +type I32RegHandler = fn(I32RegContext) -> Result<(), RuntimeError>; // Default error handler -fn i32_reg_invalid_handler(_ctx: I32RegContext, _dst: u16) -> Result<(), RuntimeError> { +fn i32_reg_invalid_handler(_ctx: I32RegContext) -> Result<(), RuntimeError> { Err(RuntimeError::InvalidHandlerIndex) } @@ -2085,9 +2177,9 @@ struct I64RegContext<'a> { i64_regs: &'a mut [i64], i32_regs: &'a mut [i32], // For comparison operations that return i32 locals: &'a mut [Val], + dst: I64RegOperand, src1: I64RegOperand, src2: Option, - dst: Reg, // Destination register (type determines which array to write to) } impl<'a> I64RegContext<'a> { @@ -2099,49 +2191,69 @@ impl<'a> I64RegContext<'a> { I64RegOperand::Param(idx) => self.locals[*idx as usize].to_i64(), } } + + #[inline] + fn set_dst(&mut self, val: i64) { + match &self.dst { + I64RegOperand::Reg(idx) => self.i64_regs[*idx as usize] = val, + I64RegOperand::Param(idx) => self.locals[*idx as usize] = Val::Num(Num::I64(val)), + I64RegOperand::Const(_) => unreachable!("Cannot write to Const dst"), + } + } + + /// For comparison operations that return i32 + /// dst must be I64RegOperand::Reg (index into i32_regs) + #[inline] + fn set_dst_i32(&mut self, val: i32) { + match &self.dst { + I64RegOperand::Reg(idx) => self.i32_regs[*idx as usize] = val, + I64RegOperand::Param(idx) => self.locals[*idx as usize] = Val::Num(Num::I32(val)), + I64RegOperand::Const(_) => unreachable!("Cannot write to Const dst"), + } + } } -fn i64_reg_local_get(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_local_get(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.i64_regs[ctx.dst.index() as usize] = val; + ctx.set_dst(val); Ok(()) } -fn i64_reg_local_set(ctx: I64RegContext) -> Result<(), RuntimeError> { - // dst.index() is the local variable index, src1 is the value source +fn i64_reg_local_set(mut ctx: I64RegContext) -> Result<(), RuntimeError> { + // dst is Param(local_idx), src1 is the value source let val = ctx.get_operand(&ctx.src1)?; - ctx.locals[ctx.dst.index() as usize] = Val::Num(Num::I64(val)); + ctx.set_dst(val); Ok(()) } -fn i64_reg_const(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_const(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.i64_regs[ctx.dst.index() as usize] = val; + ctx.set_dst(val); Ok(()) } -fn i64_reg_add(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_add(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i64_regs[ctx.dst.index() as usize] = lhs.wrapping_add(rhs); + ctx.set_dst(lhs.wrapping_add(rhs)); Ok(()) } -fn i64_reg_sub(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_sub(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i64_regs[ctx.dst.index() as usize] = lhs.wrapping_sub(rhs); + ctx.set_dst(lhs.wrapping_sub(rhs)); Ok(()) } -fn i64_reg_mul(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_mul(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i64_regs[ctx.dst.index() as usize] = lhs.wrapping_mul(rhs); + ctx.set_dst(lhs.wrapping_mul(rhs)); Ok(()) } -fn i64_reg_div_s(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_div_s(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; if rhs == 0 { @@ -2150,209 +2262,208 @@ fn i64_reg_div_s(ctx: I64RegContext) -> Result<(), RuntimeError> { if lhs == i64::MIN && rhs == -1 { return Err(RuntimeError::IntegerOverflow); } - ctx.i64_regs[ctx.dst.index() as usize] = lhs.wrapping_div(rhs); + ctx.set_dst(lhs.wrapping_div(rhs)); Ok(()) } -fn i64_reg_div_u(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_div_u(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; let rhs_u = rhs as u64; if rhs_u == 0 { return Err(RuntimeError::ZeroDivideError); } - ctx.i64_regs[ctx.dst.index() as usize] = ((lhs as u64) / rhs_u) as i64; + ctx.set_dst(((lhs as u64) / rhs_u) as i64); Ok(()) } -fn i64_reg_rem_s(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_rem_s(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; if rhs == 0 { return Err(RuntimeError::ZeroDivideError); } - ctx.i64_regs[ctx.dst.index() as usize] = lhs.wrapping_rem(rhs); + ctx.set_dst(lhs.wrapping_rem(rhs)); Ok(()) } -fn i64_reg_rem_u(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_rem_u(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; let rhs_u = rhs as u64; if rhs_u == 0 { return Err(RuntimeError::ZeroDivideError); } - ctx.i64_regs[ctx.dst.index() as usize] = ((lhs as u64) % rhs_u) as i64; + ctx.set_dst(((lhs as u64) % rhs_u) as i64); Ok(()) } -fn i64_reg_and(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_and(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i64_regs[ctx.dst.index() as usize] = lhs & rhs; + ctx.set_dst(lhs & rhs); Ok(()) } -fn i64_reg_or(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_or(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i64_regs[ctx.dst.index() as usize] = lhs | rhs; + ctx.set_dst(lhs | rhs); Ok(()) } -fn i64_reg_xor(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_xor(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i64_regs[ctx.dst.index() as usize] = lhs ^ rhs; + ctx.set_dst(lhs ^ rhs); Ok(()) } -fn i64_reg_shl(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_shl(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i64_regs[ctx.dst.index() as usize] = lhs.wrapping_shl((rhs & 0x3f) as u32); + ctx.set_dst(lhs.wrapping_shl((rhs & 0x3f) as u32)); Ok(()) } -fn i64_reg_shr_s(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_shr_s(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i64_regs[ctx.dst.index() as usize] = lhs.wrapping_shr((rhs & 0x3f) as u32); + ctx.set_dst(lhs.wrapping_shr((rhs & 0x3f) as u32)); Ok(()) } -fn i64_reg_shr_u(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_shr_u(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i64_regs[ctx.dst.index() as usize] = - ((lhs as u64).wrapping_shr((rhs & 0x3f) as u32)) as i64; + ctx.set_dst(((lhs as u64).wrapping_shr((rhs & 0x3f) as u32)) as i64); Ok(()) } -fn i64_reg_rotl(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_rotl(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i64_regs[ctx.dst.index() as usize] = (lhs as u64).rotate_left((rhs & 0x3f) as u32) as i64; + ctx.set_dst((lhs as u64).rotate_left((rhs & 0x3f) as u32) as i64); Ok(()) } -fn i64_reg_rotr(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_rotr(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i64_regs[ctx.dst.index() as usize] = (lhs as u64).rotate_right((rhs & 0x3f) as u32) as i64; + ctx.set_dst((lhs as u64).rotate_right((rhs & 0x3f) as u32) as i64); Ok(()) } -fn i64_reg_clz(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_clz(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.i64_regs[ctx.dst.index() as usize] = val.leading_zeros() as i64; + ctx.set_dst(val.leading_zeros() as i64); Ok(()) } -fn i64_reg_ctz(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_ctz(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.i64_regs[ctx.dst.index() as usize] = val.trailing_zeros() as i64; + ctx.set_dst(val.trailing_zeros() as i64); Ok(()) } -fn i64_reg_popcnt(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_popcnt(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.i64_regs[ctx.dst.index() as usize] = val.count_ones() as i64; + ctx.set_dst(val.count_ones() as i64); Ok(()) } -fn i64_reg_extend8_s(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_extend8_s(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.i64_regs[ctx.dst.index() as usize] = (val as i8) as i64; + ctx.set_dst((val as i8) as i64); Ok(()) } -fn i64_reg_extend16_s(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_extend16_s(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.i64_regs[ctx.dst.index() as usize] = (val as i16) as i64; + ctx.set_dst((val as i16) as i64); Ok(()) } -fn i64_reg_extend32_s(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_extend32_s(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.i64_regs[ctx.dst.index() as usize] = (val as i32) as i64; + ctx.set_dst((val as i32) as i64); Ok(()) } -// Comparison operations - write to i32_regs (ctx.dst is Reg::I32) -fn i64_reg_eq(ctx: I64RegContext) -> Result<(), RuntimeError> { +// Comparison operations - write to i32 (via set_dst_i32) +fn i64_reg_eq(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs == rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs == rhs { 1 } else { 0 }); Ok(()) } -fn i64_reg_ne(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_ne(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs != rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs != rhs { 1 } else { 0 }); Ok(()) } -fn i64_reg_lt_s(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_lt_s(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs < rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs < rhs { 1 } else { 0 }); Ok(()) } -fn i64_reg_lt_u(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_lt_u(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if (lhs as u64) < (rhs as u64) { 1 } else { 0 }; + ctx.set_dst_i32(if (lhs as u64) < (rhs as u64) { 1 } else { 0 }); Ok(()) } -fn i64_reg_le_s(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_le_s(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs <= rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs <= rhs { 1 } else { 0 }); Ok(()) } -fn i64_reg_le_u(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_le_u(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if (lhs as u64) <= (rhs as u64) { 1 } else { 0 }; + ctx.set_dst_i32(if (lhs as u64) <= (rhs as u64) { 1 } else { 0 }); Ok(()) } -fn i64_reg_gt_s(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_gt_s(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs > rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs > rhs { 1 } else { 0 }); Ok(()) } -fn i64_reg_gt_u(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_gt_u(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if (lhs as u64) > (rhs as u64) { 1 } else { 0 }; + ctx.set_dst_i32(if (lhs as u64) > (rhs as u64) { 1 } else { 0 }); Ok(()) } -fn i64_reg_ge_s(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_ge_s(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs >= rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs >= rhs { 1 } else { 0 }); Ok(()) } -fn i64_reg_ge_u(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_ge_u(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if (lhs as u64) >= (rhs as u64) { 1 } else { 0 }; + ctx.set_dst_i32(if (lhs as u64) >= (rhs as u64) { 1 } else { 0 }); Ok(()) } -fn i64_reg_eqz(ctx: I64RegContext) -> Result<(), RuntimeError> { +fn i64_reg_eqz(mut ctx: I64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.i32_regs[ctx.dst.index() as usize] = if val == 0 { 1 } else { 0 }; + ctx.set_dst_i32(if val == 0 { 1 } else { 0 }); Ok(()) } @@ -2424,9 +2535,9 @@ struct F32RegContext<'a> { f32_regs: &'a mut [f32], i32_regs: &'a mut [i32], // For comparison operations that return i32 locals: &'a mut [Val], + dst: F32RegOperand, src1: F32RegOperand, src2: Option, - dst: Reg, // Destination register (type determines which array to write to) } impl<'a> F32RegContext<'a> { @@ -2438,59 +2549,77 @@ impl<'a> F32RegContext<'a> { F32RegOperand::Param(idx) => self.locals[*idx as usize].to_f32(), } } + + #[inline] + fn set_dst(&mut self, val: f32) { + match &self.dst { + F32RegOperand::Reg(idx) => self.f32_regs[*idx as usize] = val, + F32RegOperand::Param(idx) => self.locals[*idx as usize] = Val::Num(Num::F32(val)), + F32RegOperand::Const(_) => unreachable!("Cannot write to Const dst"), + } + } + + #[inline] + fn set_dst_i32(&mut self, val: i32) { + match &self.dst { + F32RegOperand::Reg(idx) => self.i32_regs[*idx as usize] = val, + F32RegOperand::Param(idx) => self.locals[*idx as usize] = Val::Num(Num::I32(val)), + F32RegOperand::Const(_) => unreachable!("Cannot write to Const dst"), + } + } } -fn f32_reg_local_get(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_local_get(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f32_regs[ctx.dst.index() as usize] = val; + ctx.set_dst(val); Ok(()) } -fn f32_reg_local_set(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_local_set(mut ctx: F32RegContext) -> Result<(), RuntimeError> { // dst.index() is the local variable index, src1 is the value source let val = ctx.get_operand(&ctx.src1)?; - ctx.locals[ctx.dst.index() as usize] = Val::Num(Num::F32(val)); + ctx.set_dst(val); Ok(()) } -fn f32_reg_const(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_const(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f32_regs[ctx.dst.index() as usize] = val; + ctx.set_dst(val); Ok(()) } -fn f32_reg_add(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_add(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f32_regs[ctx.dst.index() as usize] = lhs + rhs; + ctx.set_dst(lhs + rhs); Ok(()) } -fn f32_reg_sub(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_sub(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f32_regs[ctx.dst.index() as usize] = lhs - rhs; + ctx.set_dst(lhs - rhs); Ok(()) } -fn f32_reg_mul(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_mul(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f32_regs[ctx.dst.index() as usize] = lhs * rhs; + ctx.set_dst(lhs * rhs); Ok(()) } -fn f32_reg_div(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_div(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f32_regs[ctx.dst.index() as usize] = lhs / rhs; + ctx.set_dst(lhs / rhs); Ok(()) } -fn f32_reg_min(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_min(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f32_regs[ctx.dst.index() as usize] = if lhs.is_nan() || rhs.is_nan() { + let result = if lhs.is_nan() || rhs.is_nan() { f32::NAN } else if lhs == 0.0 && rhs == 0.0 { if lhs.is_sign_negative() || rhs.is_sign_negative() { @@ -2501,13 +2630,14 @@ fn f32_reg_min(ctx: F32RegContext) -> Result<(), RuntimeError> { } else { lhs.min(rhs) }; + ctx.set_dst(result); Ok(()) } -fn f32_reg_max(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_max(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f32_regs[ctx.dst.index() as usize] = if lhs.is_nan() || rhs.is_nan() { + let result = if lhs.is_nan() || rhs.is_nan() { f32::NAN } else if lhs == 0.0 && rhs == 0.0 { if lhs.is_sign_positive() || rhs.is_sign_positive() { @@ -2518,99 +2648,100 @@ fn f32_reg_max(ctx: F32RegContext) -> Result<(), RuntimeError> { } else { lhs.max(rhs) }; + ctx.set_dst(result); Ok(()) } -fn f32_reg_copysign(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_copysign(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f32_regs[ctx.dst.index() as usize] = lhs.copysign(rhs); + ctx.set_dst(lhs.copysign(rhs)); Ok(()) } // Unary operations -fn f32_reg_abs(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_abs(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f32_regs[ctx.dst.index() as usize] = val.abs(); + ctx.set_dst(val.abs()); Ok(()) } -fn f32_reg_neg(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_neg(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f32_regs[ctx.dst.index() as usize] = -val; + ctx.set_dst(-val); Ok(()) } -fn f32_reg_ceil(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_ceil(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f32_regs[ctx.dst.index() as usize] = val.ceil(); + ctx.set_dst(val.ceil()); Ok(()) } -fn f32_reg_floor(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_floor(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f32_regs[ctx.dst.index() as usize] = val.floor(); + ctx.set_dst(val.floor()); Ok(()) } -fn f32_reg_trunc(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_trunc(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f32_regs[ctx.dst.index() as usize] = val.trunc(); + ctx.set_dst(val.trunc()); Ok(()) } -fn f32_reg_nearest(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_nearest(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f32_regs[ctx.dst.index() as usize] = val.round_ties_even(); + ctx.set_dst(val.round_ties_even()); Ok(()) } -fn f32_reg_sqrt(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_sqrt(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f32_regs[ctx.dst.index() as usize] = val.sqrt(); + ctx.set_dst(val.sqrt()); Ok(()) } // Comparison operations (return i32) -fn f32_reg_eq(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_eq(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs == rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs == rhs { 1 } else { 0 }); Ok(()) } -fn f32_reg_ne(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_ne(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs != rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs != rhs { 1 } else { 0 }); Ok(()) } -fn f32_reg_lt(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_lt(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs < rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs < rhs { 1 } else { 0 }); Ok(()) } -fn f32_reg_gt(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_gt(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs > rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs > rhs { 1 } else { 0 }); Ok(()) } -fn f32_reg_le(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_le(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs <= rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs <= rhs { 1 } else { 0 }); Ok(()) } -fn f32_reg_ge(ctx: F32RegContext) -> Result<(), RuntimeError> { +fn f32_reg_ge(mut ctx: F32RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs >= rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs >= rhs { 1 } else { 0 }); Ok(()) } @@ -2664,9 +2795,9 @@ struct F64RegContext<'a> { f64_regs: &'a mut [f64], i32_regs: &'a mut [i32], // For comparison operations that return i32 locals: &'a mut [Val], + dst: F64RegOperand, src1: F64RegOperand, src2: Option, - dst: Reg, } impl<'a> F64RegContext<'a> { @@ -2678,59 +2809,77 @@ impl<'a> F64RegContext<'a> { F64RegOperand::Param(idx) => self.locals[*idx as usize].to_f64(), } } + + #[inline] + fn set_dst(&mut self, val: f64) { + match &self.dst { + F64RegOperand::Reg(idx) => self.f64_regs[*idx as usize] = val, + F64RegOperand::Param(idx) => self.locals[*idx as usize] = Val::Num(Num::F64(val)), + F64RegOperand::Const(_) => unreachable!("Cannot write to Const dst"), + } + } + + #[inline] + fn set_dst_i32(&mut self, val: i32) { + match &self.dst { + F64RegOperand::Reg(idx) => self.i32_regs[*idx as usize] = val, + F64RegOperand::Param(idx) => self.locals[*idx as usize] = Val::Num(Num::I32(val)), + F64RegOperand::Const(_) => unreachable!("Cannot write to Const dst"), + } + } } -fn f64_reg_local_get(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_local_get(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f64_regs[ctx.dst.index() as usize] = val; + ctx.set_dst(val); Ok(()) } -fn f64_reg_local_set(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_local_set(mut ctx: F64RegContext) -> Result<(), RuntimeError> { // dst.index() is the local variable index, src1 is the value source let val = ctx.get_operand(&ctx.src1)?; - ctx.locals[ctx.dst.index() as usize] = Val::Num(Num::F64(val)); + ctx.set_dst(val); Ok(()) } -fn f64_reg_const(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_const(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f64_regs[ctx.dst.index() as usize] = val; + ctx.set_dst(val); Ok(()) } -fn f64_reg_add(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_add(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f64_regs[ctx.dst.index() as usize] = lhs + rhs; + ctx.set_dst(lhs + rhs); Ok(()) } -fn f64_reg_sub(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_sub(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f64_regs[ctx.dst.index() as usize] = lhs - rhs; + ctx.set_dst(lhs - rhs); Ok(()) } -fn f64_reg_mul(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_mul(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f64_regs[ctx.dst.index() as usize] = lhs * rhs; + ctx.set_dst(lhs * rhs); Ok(()) } -fn f64_reg_div(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_div(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f64_regs[ctx.dst.index() as usize] = lhs / rhs; + ctx.set_dst(lhs / rhs); Ok(()) } -fn f64_reg_min(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_min(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f64_regs[ctx.dst.index() as usize] = if lhs.is_nan() || rhs.is_nan() { + let result = if lhs.is_nan() || rhs.is_nan() { f64::NAN } else if lhs == 0.0 && rhs == 0.0 { if lhs.is_sign_negative() || rhs.is_sign_negative() { @@ -2741,13 +2890,14 @@ fn f64_reg_min(ctx: F64RegContext) -> Result<(), RuntimeError> { } else { lhs.min(rhs) }; + ctx.set_dst(result); Ok(()) } -fn f64_reg_max(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_max(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f64_regs[ctx.dst.index() as usize] = if lhs.is_nan() || rhs.is_nan() { + let result = if lhs.is_nan() || rhs.is_nan() { f64::NAN } else if lhs == 0.0 && rhs == 0.0 { if lhs.is_sign_positive() || rhs.is_sign_positive() { @@ -2758,99 +2908,100 @@ fn f64_reg_max(ctx: F64RegContext) -> Result<(), RuntimeError> { } else { lhs.max(rhs) }; + ctx.set_dst(result); Ok(()) } -fn f64_reg_copysign(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_copysign(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.f64_regs[ctx.dst.index() as usize] = lhs.copysign(rhs); + ctx.set_dst(lhs.copysign(rhs)); Ok(()) } // Unary operations -fn f64_reg_abs(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_abs(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f64_regs[ctx.dst.index() as usize] = val.abs(); + ctx.set_dst(val.abs()); Ok(()) } -fn f64_reg_neg(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_neg(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f64_regs[ctx.dst.index() as usize] = -val; + ctx.set_dst(-val); Ok(()) } -fn f64_reg_ceil(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_ceil(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f64_regs[ctx.dst.index() as usize] = val.ceil(); + ctx.set_dst(val.ceil()); Ok(()) } -fn f64_reg_floor(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_floor(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f64_regs[ctx.dst.index() as usize] = val.floor(); + ctx.set_dst(val.floor()); Ok(()) } -fn f64_reg_trunc(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_trunc(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f64_regs[ctx.dst.index() as usize] = val.trunc(); + ctx.set_dst(val.trunc()); Ok(()) } -fn f64_reg_nearest(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_nearest(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f64_regs[ctx.dst.index() as usize] = val.round_ties_even(); + ctx.set_dst(val.round_ties_even()); Ok(()) } -fn f64_reg_sqrt(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_sqrt(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let val = ctx.get_operand(&ctx.src1)?; - ctx.f64_regs[ctx.dst.index() as usize] = val.sqrt(); + ctx.set_dst(val.sqrt()); Ok(()) } // Comparison operations (return i32) -fn f64_reg_eq(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_eq(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs == rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs == rhs { 1 } else { 0 }); Ok(()) } -fn f64_reg_ne(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_ne(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs != rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs != rhs { 1 } else { 0 }); Ok(()) } -fn f64_reg_lt(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_lt(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs < rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs < rhs { 1 } else { 0 }); Ok(()) } -fn f64_reg_gt(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_gt(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs > rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs > rhs { 1 } else { 0 }); Ok(()) } -fn f64_reg_le(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_le(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs <= rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs <= rhs { 1 } else { 0 }); Ok(()) } -fn f64_reg_ge(ctx: F64RegContext) -> Result<(), RuntimeError> { +fn f64_reg_ge(mut ctx: F64RegContext) -> Result<(), RuntimeError> { let lhs = ctx.get_operand(&ctx.src1)?; let rhs = ctx.get_operand(&ctx.src2.as_ref().unwrap())?; - ctx.i32_regs[ctx.dst.index() as usize] = if lhs >= rhs { 1 } else { 0 }; + ctx.set_dst_i32(if lhs >= rhs { 1 } else { 0 }); Ok(()) } @@ -2896,8 +3047,43 @@ lazy_static! { // Conversion Reg handlers struct ConversionRegContext<'a> { reg_file: &'a mut RegFile, + locals: &'a mut [Val], src: Reg, - dst: Reg, + dst: RegOrLocal, +} + +impl<'a> ConversionRegContext<'a> { + #[inline] + fn set_dst_i32(&mut self, val: i32) { + match &self.dst { + RegOrLocal::Reg(idx) => self.reg_file.set_i32(*idx, val), + RegOrLocal::Local(idx) => self.locals[*idx as usize] = Val::Num(Num::I32(val)), + } + } + + #[inline] + fn set_dst_i64(&mut self, val: i64) { + match &self.dst { + RegOrLocal::Reg(idx) => self.reg_file.set_i64(*idx, val), + RegOrLocal::Local(idx) => self.locals[*idx as usize] = Val::Num(Num::I64(val)), + } + } + + #[inline] + fn set_dst_f32(&mut self, val: f32) { + match &self.dst { + RegOrLocal::Reg(idx) => self.reg_file.set_f32(*idx, val), + RegOrLocal::Local(idx) => self.locals[*idx as usize] = Val::Num(Num::F32(val)), + } + } + + #[inline] + fn set_dst_f64(&mut self, val: f64) { + match &self.dst { + RegOrLocal::Reg(idx) => self.reg_file.set_f64(*idx, val), + RegOrLocal::Local(idx) => self.locals[*idx as usize] = Val::Num(Num::F64(val)), + } + } } type ConversionRegHandler = fn(ConversionRegContext) -> Result<(), RuntimeError>; @@ -2907,27 +3093,27 @@ fn conversion_reg_invalid_handler(_ctx: ConversionRegContext) -> Result<(), Runt } // i32 -> i64 -fn conv_i64_extend_i32_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i64_extend_i32_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i32(ctx.src.index()); - ctx.reg_file.set_i64(ctx.dst.index(), val as i64); + ctx.set_dst_i64(val as i64); Ok(()) } -fn conv_i64_extend_i32_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i64_extend_i32_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i32(ctx.src.index()); - ctx.reg_file.set_i64(ctx.dst.index(), (val as u32) as i64); + ctx.set_dst_i64((val as u32) as i64); Ok(()) } // i64 -> i32 -fn conv_i32_wrap_i64(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i32_wrap_i64(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i64(ctx.src.index()); - ctx.reg_file.set_i32(ctx.dst.index(), val as i32); + ctx.set_dst_i32(val as i32); Ok(()) } // f32 -> i32 -fn conv_i32_trunc_f32_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i32_trunc_f32_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f32(ctx.src.index()); if val.is_nan() { return Err(RuntimeError::InvalidConversionToInt); @@ -2936,11 +3122,11 @@ fn conv_i32_trunc_f32_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { if truncated < (i32::MIN as f32) || truncated > (i32::MAX as f32) { return Err(RuntimeError::IntegerOverflow); } - ctx.reg_file.set_i32(ctx.dst.index(), truncated as i32); + ctx.set_dst_i32(truncated as i32); Ok(()) } -fn conv_i32_trunc_f32_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i32_trunc_f32_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f32(ctx.src.index()); if val.is_nan() { return Err(RuntimeError::InvalidConversionToInt); @@ -2949,13 +3135,12 @@ fn conv_i32_trunc_f32_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { if truncated < 0.0 || truncated > (u32::MAX as f32) { return Err(RuntimeError::IntegerOverflow); } - ctx.reg_file - .set_i32(ctx.dst.index(), (truncated as u32) as i32); + ctx.set_dst_i32((truncated as u32) as i32); Ok(()) } // f64 -> i32 -fn conv_i32_trunc_f64_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i32_trunc_f64_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f64(ctx.src.index()); if val.is_nan() { return Err(RuntimeError::InvalidConversionToInt); @@ -2964,11 +3149,11 @@ fn conv_i32_trunc_f64_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { if truncated < (i32::MIN as f64) || truncated > (i32::MAX as f64) { return Err(RuntimeError::IntegerOverflow); } - ctx.reg_file.set_i32(ctx.dst.index(), truncated as i32); + ctx.set_dst_i32(truncated as i32); Ok(()) } -fn conv_i32_trunc_f64_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i32_trunc_f64_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f64(ctx.src.index()); if val.is_nan() { return Err(RuntimeError::InvalidConversionToInt); @@ -2977,13 +3162,12 @@ fn conv_i32_trunc_f64_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { if truncated < 0.0 || truncated > (u32::MAX as f64) { return Err(RuntimeError::IntegerOverflow); } - ctx.reg_file - .set_i32(ctx.dst.index(), (truncated as u32) as i32); + ctx.set_dst_i32((truncated as u32) as i32); Ok(()) } // f32 -> i64 -fn conv_i64_trunc_f32_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i64_trunc_f32_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f32(ctx.src.index()); if val.is_nan() { return Err(RuntimeError::InvalidConversionToInt); @@ -2992,11 +3176,11 @@ fn conv_i64_trunc_f32_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { if truncated < (i64::MIN as f32) || truncated >= (i64::MAX as f32) { return Err(RuntimeError::IntegerOverflow); } - ctx.reg_file.set_i64(ctx.dst.index(), truncated as i64); + ctx.set_dst_i64(truncated as i64); Ok(()) } -fn conv_i64_trunc_f32_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i64_trunc_f32_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f32(ctx.src.index()); if val.is_nan() { return Err(RuntimeError::InvalidConversionToInt); @@ -3005,13 +3189,12 @@ fn conv_i64_trunc_f32_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { if truncated < 0.0 || truncated >= (u64::MAX as f32) { return Err(RuntimeError::IntegerOverflow); } - ctx.reg_file - .set_i64(ctx.dst.index(), (truncated as u64) as i64); + ctx.set_dst_i64((truncated as u64) as i64); Ok(()) } // f64 -> i64 -fn conv_i64_trunc_f64_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i64_trunc_f64_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f64(ctx.src.index()); if val.is_nan() { return Err(RuntimeError::InvalidConversionToInt); @@ -3020,11 +3203,11 @@ fn conv_i64_trunc_f64_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { if truncated < (i64::MIN as f64) || truncated >= (i64::MAX as f64) { return Err(RuntimeError::IntegerOverflow); } - ctx.reg_file.set_i64(ctx.dst.index(), truncated as i64); + ctx.set_dst_i64(truncated as i64); Ok(()) } -fn conv_i64_trunc_f64_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i64_trunc_f64_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f64(ctx.src.index()); if val.is_nan() { return Err(RuntimeError::InvalidConversionToInt); @@ -3033,13 +3216,12 @@ fn conv_i64_trunc_f64_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { if truncated < 0.0 || truncated >= (u64::MAX as f64) { return Err(RuntimeError::IntegerOverflow); } - ctx.reg_file - .set_i64(ctx.dst.index(), (truncated as u64) as i64); + ctx.set_dst_i64((truncated as u64) as i64); Ok(()) } // Saturating truncations (i32) -fn conv_i32_trunc_sat_f32_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i32_trunc_sat_f32_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f32(ctx.src.index()); let result = if val.is_nan() { 0 @@ -3050,11 +3232,11 @@ fn conv_i32_trunc_sat_f32_s(ctx: ConversionRegContext) -> Result<(), RuntimeErro } else { val.trunc() as i32 }; - ctx.reg_file.set_i32(ctx.dst.index(), result); + ctx.set_dst_i32(result); Ok(()) } -fn conv_i32_trunc_sat_f32_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i32_trunc_sat_f32_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f32(ctx.src.index()); let result = if val.is_nan() || val <= 0.0 { 0 @@ -3063,11 +3245,11 @@ fn conv_i32_trunc_sat_f32_u(ctx: ConversionRegContext) -> Result<(), RuntimeErro } else { val.trunc() as u32 }; - ctx.reg_file.set_i32(ctx.dst.index(), result as i32); + ctx.set_dst_i32(result as i32); Ok(()) } -fn conv_i32_trunc_sat_f64_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i32_trunc_sat_f64_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f64(ctx.src.index()); let result = if val.is_nan() { 0 @@ -3078,11 +3260,11 @@ fn conv_i32_trunc_sat_f64_s(ctx: ConversionRegContext) -> Result<(), RuntimeErro } else { val.trunc() as i32 }; - ctx.reg_file.set_i32(ctx.dst.index(), result); + ctx.set_dst_i32(result); Ok(()) } -fn conv_i32_trunc_sat_f64_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i32_trunc_sat_f64_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f64(ctx.src.index()); let result = if val.is_nan() || val <= 0.0 { 0 @@ -3091,12 +3273,12 @@ fn conv_i32_trunc_sat_f64_u(ctx: ConversionRegContext) -> Result<(), RuntimeErro } else { val.trunc() as u32 }; - ctx.reg_file.set_i32(ctx.dst.index(), result as i32); + ctx.set_dst_i32(result as i32); Ok(()) } // Saturating truncations (i64) -fn conv_i64_trunc_sat_f32_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i64_trunc_sat_f32_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f32(ctx.src.index()); let result = if val.is_nan() { 0 @@ -3107,11 +3289,11 @@ fn conv_i64_trunc_sat_f32_s(ctx: ConversionRegContext) -> Result<(), RuntimeErro } else { val.trunc() as i64 }; - ctx.reg_file.set_i64(ctx.dst.index(), result); + ctx.set_dst_i64(result); Ok(()) } -fn conv_i64_trunc_sat_f32_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i64_trunc_sat_f32_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f32(ctx.src.index()); let result = if val.is_nan() || val <= 0.0 { 0 @@ -3120,11 +3302,11 @@ fn conv_i64_trunc_sat_f32_u(ctx: ConversionRegContext) -> Result<(), RuntimeErro } else { val.trunc() as u64 }; - ctx.reg_file.set_i64(ctx.dst.index(), result as i64); + ctx.set_dst_i64(result as i64); Ok(()) } -fn conv_i64_trunc_sat_f64_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i64_trunc_sat_f64_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f64(ctx.src.index()); let result = if val.is_nan() { 0 @@ -3135,11 +3317,11 @@ fn conv_i64_trunc_sat_f64_s(ctx: ConversionRegContext) -> Result<(), RuntimeErro } else { val.trunc() as i64 }; - ctx.reg_file.set_i64(ctx.dst.index(), result); + ctx.set_dst_i64(result); Ok(()) } -fn conv_i64_trunc_sat_f64_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i64_trunc_sat_f64_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f64(ctx.src.index()); let result = if val.is_nan() || val <= 0.0 { 0 @@ -3148,100 +3330,98 @@ fn conv_i64_trunc_sat_f64_u(ctx: ConversionRegContext) -> Result<(), RuntimeErro } else { val.trunc() as u64 }; - ctx.reg_file.set_i64(ctx.dst.index(), result as i64); + ctx.set_dst_i64(result as i64); Ok(()) } // i32 -> f32 -fn conv_f32_convert_i32_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f32_convert_i32_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i32(ctx.src.index()); - ctx.reg_file.set_f32(ctx.dst.index(), val as f32); + ctx.set_dst_f32(val as f32); Ok(()) } -fn conv_f32_convert_i32_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f32_convert_i32_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i32(ctx.src.index()); - ctx.reg_file.set_f32(ctx.dst.index(), (val as u32) as f32); + ctx.set_dst_f32((val as u32) as f32); Ok(()) } // i64 -> f32 -fn conv_f32_convert_i64_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f32_convert_i64_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i64(ctx.src.index()); - ctx.reg_file.set_f32(ctx.dst.index(), val as f32); + ctx.set_dst_f32(val as f32); Ok(()) } -fn conv_f32_convert_i64_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f32_convert_i64_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i64(ctx.src.index()); - ctx.reg_file.set_f32(ctx.dst.index(), (val as u64) as f32); + ctx.set_dst_f32((val as u64) as f32); Ok(()) } // i32 -> f64 -fn conv_f64_convert_i32_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f64_convert_i32_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i32(ctx.src.index()); - ctx.reg_file.set_f64(ctx.dst.index(), val as f64); + ctx.set_dst_f64(val as f64); Ok(()) } -fn conv_f64_convert_i32_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f64_convert_i32_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i32(ctx.src.index()); - ctx.reg_file.set_f64(ctx.dst.index(), (val as u32) as f64); + ctx.set_dst_f64((val as u32) as f64); Ok(()) } // i64 -> f64 -fn conv_f64_convert_i64_s(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f64_convert_i64_s(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i64(ctx.src.index()); - ctx.reg_file.set_f64(ctx.dst.index(), val as f64); + ctx.set_dst_f64(val as f64); Ok(()) } -fn conv_f64_convert_i64_u(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f64_convert_i64_u(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i64(ctx.src.index()); - ctx.reg_file.set_f64(ctx.dst.index(), (val as u64) as f64); + ctx.set_dst_f64((val as u64) as f64); Ok(()) } // f64 -> f32 -fn conv_f32_demote_f64(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f32_demote_f64(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f64(ctx.src.index()); - ctx.reg_file.set_f32(ctx.dst.index(), val as f32); + ctx.set_dst_f32(val as f32); Ok(()) } // f32 -> f64 -fn conv_f64_promote_f32(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f64_promote_f32(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f32(ctx.src.index()); - ctx.reg_file.set_f64(ctx.dst.index(), val as f64); + ctx.set_dst_f64(val as f64); Ok(()) } // Reinterpret operations -fn conv_i32_reinterpret_f32(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i32_reinterpret_f32(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f32(ctx.src.index()); - ctx.reg_file.set_i32(ctx.dst.index(), val.to_bits() as i32); + ctx.set_dst_i32(val.to_bits() as i32); Ok(()) } -fn conv_f32_reinterpret_i32(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f32_reinterpret_i32(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i32(ctx.src.index()); - ctx.reg_file - .set_f32(ctx.dst.index(), f32::from_bits(val as u32)); + ctx.set_dst_f32(f32::from_bits(val as u32)); Ok(()) } -fn conv_i64_reinterpret_f64(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_i64_reinterpret_f64(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_f64(ctx.src.index()); - ctx.reg_file.set_i64(ctx.dst.index(), val.to_bits() as i64); + ctx.set_dst_i64(val.to_bits() as i64); Ok(()) } -fn conv_f64_reinterpret_i64(ctx: ConversionRegContext) -> Result<(), RuntimeError> { +fn conv_f64_reinterpret_i64(mut ctx: ConversionRegContext) -> Result<(), RuntimeError> { let val = ctx.reg_file.get_i64(ctx.src.index()); - ctx.reg_file - .set_f64(ctx.dst.index(), f64::from_bits(val as u64)); + ctx.set_dst_f64(f64::from_bits(val as u64)); Ok(()) } @@ -3301,12 +3481,56 @@ lazy_static! { // Memory Load Reg handlers struct MemoryLoadRegContext<'a> { reg_file: &'a mut RegFile, + locals: &'a mut [Val], mem_addr: &'a MemAddr, - addr: Reg, - dst: Reg, + addr: I32RegOperand, + dst: RegOrLocal, offset: u64, } +impl<'a> MemoryLoadRegContext<'a> { + #[inline] + fn get_addr(&self) -> Result { + match &self.addr { + I32RegOperand::Reg(idx) => Ok(self.reg_file.get_i32(*idx)), + I32RegOperand::Const(val) => Ok(*val), + I32RegOperand::Param(idx) => self.locals[*idx as usize].to_i32(), + } + } + + #[inline] + fn set_dst_i32(&mut self, val: i32) { + match &self.dst { + RegOrLocal::Reg(idx) => self.reg_file.set_i32(*idx, val), + RegOrLocal::Local(idx) => self.locals[*idx as usize] = Val::Num(Num::I32(val)), + } + } + + #[inline] + fn set_dst_i64(&mut self, val: i64) { + match &self.dst { + RegOrLocal::Reg(idx) => self.reg_file.set_i64(*idx, val), + RegOrLocal::Local(idx) => self.locals[*idx as usize] = Val::Num(Num::I64(val)), + } + } + + #[inline] + fn set_dst_f32(&mut self, val: f32) { + match &self.dst { + RegOrLocal::Reg(idx) => self.reg_file.set_f32(*idx, val), + RegOrLocal::Local(idx) => self.locals[*idx as usize] = Val::Num(Num::F32(val)), + } + } + + #[inline] + fn set_dst_f64(&mut self, val: f64) { + match &self.dst { + RegOrLocal::Reg(idx) => self.reg_file.set_f64(*idx, val), + RegOrLocal::Local(idx) => self.locals[*idx as usize] = Val::Num(Num::F64(val)), + } + } +} + type MemoryLoadRegHandler = fn(MemoryLoadRegContext) -> Result<(), RuntimeError>; fn memory_load_reg_invalid_handler(_ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { @@ -3321,115 +3545,115 @@ fn make_memarg(offset: u64) -> Memarg { } } -fn mem_load_i32(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i32(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: i32 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i32(ctx.dst.index(), val); + let val: i32 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i32(val); Ok(()) } -fn mem_load_i64(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i64(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: i64 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i64(ctx.dst.index(), val); + let val: i64 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i64(val); Ok(()) } -fn mem_load_f32(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_f32(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: f32 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_f32(ctx.dst.index(), val); + let val: f32 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_f32(val); Ok(()) } -fn mem_load_f64(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_f64(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: f64 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_f64(ctx.dst.index(), val); + let val: f64 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_f64(val); Ok(()) } -fn mem_load_i32_8s(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i32_8s(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: i8 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i32(ctx.dst.index(), val as i32); + let val: i8 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i32(val as i32); Ok(()) } -fn mem_load_i32_8u(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i32_8u(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: u8 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i32(ctx.dst.index(), val as i32); + let val: u8 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i32(val as i32); Ok(()) } -fn mem_load_i32_16s(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i32_16s(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: i16 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i32(ctx.dst.index(), val as i32); + let val: i16 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i32(val as i32); Ok(()) } -fn mem_load_i32_16u(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i32_16u(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: u16 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i32(ctx.dst.index(), val as i32); + let val: u16 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i32(val as i32); Ok(()) } -fn mem_load_i64_8s(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i64_8s(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: i8 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i64(ctx.dst.index(), val as i64); + let val: i8 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i64(val as i64); Ok(()) } -fn mem_load_i64_8u(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i64_8u(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: u8 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i64(ctx.dst.index(), val as i64); + let val: u8 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i64(val as i64); Ok(()) } -fn mem_load_i64_16s(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i64_16s(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: i16 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i64(ctx.dst.index(), val as i64); + let val: i16 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i64(val as i64); Ok(()) } -fn mem_load_i64_16u(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i64_16u(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: u16 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i64(ctx.dst.index(), val as i64); + let val: u16 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i64(val as i64); Ok(()) } -fn mem_load_i64_32s(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i64_32s(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: i32 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i64(ctx.dst.index(), val as i64); + let val: i32 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i64(val as i64); Ok(()) } -fn mem_load_i64_32u(ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); +fn mem_load_i64_32u(mut ctx: MemoryLoadRegContext) -> Result<(), RuntimeError> { + let ptr = ctx.get_addr()?; let memarg = make_memarg(ctx.offset); - let val: u32 = ctx.mem_addr.load(&memarg, ptr)?; - ctx.reg_file.set_i64(ctx.dst.index(), val as i64); + let val: u32 = ctx.mem_addr.load(&memarg, ptr); + ctx.set_dst_i64(val as i64); Ok(()) } @@ -3459,12 +3683,24 @@ lazy_static! { // Memory Store Reg handlers struct MemoryStoreRegContext<'a> { reg_file: &'a RegFile, + locals: &'a [Val], mem_addr: &'a MemAddr, - addr: Reg, + addr: I32RegOperand, value: Reg, offset: u64, } +impl<'a> MemoryStoreRegContext<'a> { + #[inline] + fn get_addr(&self) -> Result { + match &self.addr { + I32RegOperand::Reg(idx) => Ok(self.reg_file.get_i32(*idx)), + I32RegOperand::Const(val) => Ok(*val), + I32RegOperand::Param(idx) => self.locals[*idx as usize].to_i32(), + } + } +} + type MemoryStoreRegHandler = fn(MemoryStoreRegContext) -> Result<(), RuntimeError>; fn memory_store_reg_invalid_handler(_ctx: MemoryStoreRegContext) -> Result<(), RuntimeError> { @@ -3472,74 +3708,74 @@ fn memory_store_reg_invalid_handler(_ctx: MemoryStoreRegContext) -> Result<(), R } fn mem_store_i32(ctx: MemoryStoreRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); + let ptr = ctx.get_addr()?; let val = ctx.reg_file.get_i32(ctx.value.index()); let memarg = make_memarg(ctx.offset); - ctx.mem_addr.store(&memarg, ptr, val)?; + ctx.mem_addr.store(&memarg, ptr, val); Ok(()) } fn mem_store_i64(ctx: MemoryStoreRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); + let ptr = ctx.get_addr()?; let val = ctx.reg_file.get_i64(ctx.value.index()); let memarg = make_memarg(ctx.offset); - ctx.mem_addr.store(&memarg, ptr, val)?; + ctx.mem_addr.store(&memarg, ptr, val); Ok(()) } fn mem_store_f32(ctx: MemoryStoreRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); + let ptr = ctx.get_addr()?; let val = ctx.reg_file.get_f32(ctx.value.index()); let memarg = make_memarg(ctx.offset); - ctx.mem_addr.store(&memarg, ptr, val)?; + ctx.mem_addr.store(&memarg, ptr, val); Ok(()) } fn mem_store_f64(ctx: MemoryStoreRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); + let ptr = ctx.get_addr()?; let val = ctx.reg_file.get_f64(ctx.value.index()); let memarg = make_memarg(ctx.offset); - ctx.mem_addr.store(&memarg, ptr, val)?; + ctx.mem_addr.store(&memarg, ptr, val); Ok(()) } fn mem_store_i32_8(ctx: MemoryStoreRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); + let ptr = ctx.get_addr()?; let val = ctx.reg_file.get_i32(ctx.value.index()) as u8; let memarg = make_memarg(ctx.offset); - ctx.mem_addr.store(&memarg, ptr, val)?; + ctx.mem_addr.store(&memarg, ptr, val); Ok(()) } fn mem_store_i32_16(ctx: MemoryStoreRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); + let ptr = ctx.get_addr()?; let val = ctx.reg_file.get_i32(ctx.value.index()) as u16; let memarg = make_memarg(ctx.offset); - ctx.mem_addr.store(&memarg, ptr, val)?; + ctx.mem_addr.store(&memarg, ptr, val); Ok(()) } fn mem_store_i64_8(ctx: MemoryStoreRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); + let ptr = ctx.get_addr()?; let val = ctx.reg_file.get_i64(ctx.value.index()) as u8; let memarg = make_memarg(ctx.offset); - ctx.mem_addr.store(&memarg, ptr, val)?; + ctx.mem_addr.store(&memarg, ptr, val); Ok(()) } fn mem_store_i64_16(ctx: MemoryStoreRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); + let ptr = ctx.get_addr()?; let val = ctx.reg_file.get_i64(ctx.value.index()) as u16; let memarg = make_memarg(ctx.offset); - ctx.mem_addr.store(&memarg, ptr, val)?; + ctx.mem_addr.store(&memarg, ptr, val); Ok(()) } fn mem_store_i64_32(ctx: MemoryStoreRegContext) -> Result<(), RuntimeError> { - let ptr = ctx.reg_file.get_i32(ctx.addr.index()); + let ptr = ctx.get_addr()?; let val = ctx.reg_file.get_i64(ctx.value.index()) as u32; let memarg = make_memarg(ctx.offset); - ctx.mem_addr.store(&memarg, ptr, val)?; + ctx.mem_addr.store(&memarg, ptr, val); Ok(()) } @@ -3567,7 +3803,7 @@ struct MemoryOpsRegContext<'a> { mem_addr: &'a MemAddr, module_inst: &'a ModuleInst, dst: Option, - args: Vec, + args: &'a [Reg], data_index: u32, } @@ -3605,7 +3841,7 @@ fn mem_ops_copy(ctx: MemoryOpsRegContext) -> Result<(), RuntimeError> { let dest = ctx.reg_file.get_i32(ctx.args[0].index()); let src = ctx.reg_file.get_i32(ctx.args[1].index()); let len = ctx.reg_file.get_i32(ctx.args[2].index()); - ctx.mem_addr.memory_copy(dest, src, len)?; + ctx.mem_addr.memory_copy(dest, src, len); Ok(()) } @@ -3622,8 +3858,7 @@ fn mem_ops_init(ctx: MemoryOpsRegContext) -> Result<(), RuntimeError> { let data_bytes = data_addr.get_data(); if len > 0 { - let init_data = data_bytes[offset..offset + len].to_vec(); - ctx.mem_addr.init(dest, &init_data); + ctx.mem_addr.init(dest, &data_bytes[offset..offset + len]); } Ok(()) } @@ -3632,7 +3867,7 @@ fn mem_ops_fill(ctx: MemoryOpsRegContext) -> Result<(), RuntimeError> { let dest = ctx.reg_file.get_i32(ctx.args[0].index()); let val = ctx.reg_file.get_i32(ctx.args[1].index()) as u8; let size = ctx.reg_file.get_i32(ctx.args[2].index()); - ctx.mem_addr.memory_fill(dest, val, size)?; + ctx.mem_addr.memory_fill(dest, val, size); Ok(()) } @@ -3659,53 +3894,75 @@ struct SelectRegContext<'a> { cond: Reg, } +impl<'a> SelectRegContext<'a> { + #[inline] + fn set_dst_i32(&mut self, val: i32) { + self.reg_file.set_i32(self.dst.index(), val); + } + + #[inline] + fn set_dst_i64(&mut self, val: i64) { + self.reg_file.set_i64(self.dst.index(), val); + } + + #[inline] + fn set_dst_f32(&mut self, val: f32) { + self.reg_file.set_f32(self.dst.index(), val); + } + + #[inline] + fn set_dst_f64(&mut self, val: f64) { + self.reg_file.set_f64(self.dst.index(), val); + } +} + type SelectRegHandler = fn(SelectRegContext) -> Result<(), RuntimeError>; fn select_reg_invalid_handler(_ctx: SelectRegContext) -> Result<(), RuntimeError> { Err(RuntimeError::InvalidHandlerIndex) } -fn select_i32(ctx: SelectRegContext) -> Result<(), RuntimeError> { +fn select_i32(mut ctx: SelectRegContext) -> Result<(), RuntimeError> { let cond = ctx.reg_file.get_i32(ctx.cond.index()); let result = if cond != 0 { ctx.reg_file.get_i32(ctx.val1.index()) } else { ctx.reg_file.get_i32(ctx.val2.index()) }; - ctx.reg_file.set_i32(ctx.dst.index(), result); + ctx.set_dst_i32(result); Ok(()) } -fn select_i64(ctx: SelectRegContext) -> Result<(), RuntimeError> { +fn select_i64(mut ctx: SelectRegContext) -> Result<(), RuntimeError> { let cond = ctx.reg_file.get_i32(ctx.cond.index()); let result = if cond != 0 { ctx.reg_file.get_i64(ctx.val1.index()) } else { ctx.reg_file.get_i64(ctx.val2.index()) }; - ctx.reg_file.set_i64(ctx.dst.index(), result); + ctx.set_dst_i64(result); Ok(()) } -fn select_f32(ctx: SelectRegContext) -> Result<(), RuntimeError> { +fn select_f32(mut ctx: SelectRegContext) -> Result<(), RuntimeError> { let cond = ctx.reg_file.get_i32(ctx.cond.index()); let result = if cond != 0 { ctx.reg_file.get_f32(ctx.val1.index()) } else { ctx.reg_file.get_f32(ctx.val2.index()) }; - ctx.reg_file.set_f32(ctx.dst.index(), result); + ctx.set_dst_f32(result); Ok(()) } -fn select_f64(ctx: SelectRegContext) -> Result<(), RuntimeError> { +fn select_f64(mut ctx: SelectRegContext) -> Result<(), RuntimeError> { let cond = ctx.reg_file.get_i32(ctx.cond.index()); let result = if cond != 0 { ctx.reg_file.get_f64(ctx.val1.index()) } else { ctx.reg_file.get_f64(ctx.val2.index()) }; - ctx.reg_file.set_f64(ctx.dst.index(), result); + ctx.set_dst_f64(result); Ok(()) } diff --git a/src/parser.rs b/src/parser.rs index 9a3f356..05761c6 100644 --- a/src/parser.rs +++ b/src/parser.rs @@ -53,6 +53,535 @@ use rustc_hash::FxHashMap; use std::rc::Rc; use std::sync::LazyLock; +/// Pending operand for peek-based operand folding. +/// When a const or local.get instruction is followed by a foldable consumer, +/// the operand is stored here and the source instruction is skipped. +#[derive(Clone, Copy, Debug)] +enum PendingOperand { + I32Const(i32), + I64Const(i64), + F32Const(f32), + F64Const(f64), + I32Local(u16), + I64Local(u16), + F32Local(u16), + F64Local(u16), +} + +/// Extract I32RegOperand from pending_operands stack, falling back to register +#[inline] +fn take_i32_operand(pending: &mut Vec, reg_index: u16) -> I32RegOperand { + if let Some(op) = pending.pop() { + match op { + PendingOperand::I32Const(v) => I32RegOperand::Const(v), + PendingOperand::I32Local(idx) => I32RegOperand::Param(idx), + _ => { + pending.push(op); + I32RegOperand::Reg(reg_index) + } + } + } else { + I32RegOperand::Reg(reg_index) + } +} + +/// Extract I64RegOperand from pending_operands stack, falling back to register +#[inline] +fn take_i64_operand(pending: &mut Vec, reg_index: u16) -> I64RegOperand { + if let Some(op) = pending.pop() { + match op { + PendingOperand::I64Const(v) => I64RegOperand::Const(v), + PendingOperand::I64Local(idx) => I64RegOperand::Param(idx), + _ => { + pending.push(op); + I64RegOperand::Reg(reg_index) + } + } + } else { + I64RegOperand::Reg(reg_index) + } +} + +/// Extract F32RegOperand from pending_operands stack, falling back to register +#[inline] +fn take_f32_operand(pending: &mut Vec, reg_index: u16) -> F32RegOperand { + if let Some(op) = pending.pop() { + match op { + PendingOperand::F32Const(v) => F32RegOperand::Const(v), + PendingOperand::F32Local(idx) => F32RegOperand::Param(idx), + _ => { + pending.push(op); + F32RegOperand::Reg(reg_index) + } + } + } else { + F32RegOperand::Reg(reg_index) + } +} + +/// Extract F64RegOperand from pending_operands stack, falling back to register +#[inline] +fn take_f64_operand(pending: &mut Vec, reg_index: u16) -> F64RegOperand { + if let Some(op) = pending.pop() { + match op { + PendingOperand::F64Const(v) => F64RegOperand::Const(v), + PendingOperand::F64Local(idx) => F64RegOperand::Param(idx), + _ => { + pending.push(op); + F64RegOperand::Reg(reg_index) + } + } + } else { + F64RegOperand::Reg(reg_index) + } +} + +/// Check if the next instruction(s) can fold an I32 operand. +#[inline] +fn can_fold_i32<'a>( + ops: &mut itertools::MultiPeek< + impl Iterator, usize), wasmparser::BinaryReaderError>>, + >, +) -> bool { + let can_fold = if let Some(Ok((next_op, _))) = ops.peek() { + if is_i32_foldable_consumer(next_op) { + true + } else if matches!( + next_op, + wasmparser::Operator::I32Const { .. } | wasmparser::Operator::LocalGet { .. } + ) { + if let Some(Ok((next_next_op, _))) = ops.peek() { + is_i32_foldable_consumer(next_next_op) + } else { + false + } + } else { + false + } + } else { + false + }; + ops.reset_peek(); + can_fold +} + +/// Check if the next instruction(s) can fold an I64 operand. +#[inline] +fn can_fold_i64<'a>( + ops: &mut itertools::MultiPeek< + impl Iterator, usize), wasmparser::BinaryReaderError>>, + >, +) -> bool { + let can_fold = if let Some(Ok((next_op, _))) = ops.peek() { + if is_i64_foldable_consumer(next_op) { + true + } else if matches!( + next_op, + wasmparser::Operator::I64Const { .. } | wasmparser::Operator::LocalGet { .. } + ) { + if let Some(Ok((next_next_op, _))) = ops.peek() { + is_i64_foldable_consumer(next_next_op) + } else { + false + } + } else { + false + } + } else { + false + }; + ops.reset_peek(); + can_fold +} + +/// Check if the next instruction(s) can fold an F32 operand. +#[inline] +fn can_fold_f32<'a>( + ops: &mut itertools::MultiPeek< + impl Iterator, usize), wasmparser::BinaryReaderError>>, + >, +) -> bool { + let can_fold = if let Some(Ok((next_op, _))) = ops.peek() { + if is_f32_foldable_consumer(next_op) { + true + } else if matches!( + next_op, + wasmparser::Operator::F32Const { .. } | wasmparser::Operator::LocalGet { .. } + ) { + if let Some(Ok((next_next_op, _))) = ops.peek() { + is_f32_foldable_consumer(next_next_op) + } else { + false + } + } else { + false + } + } else { + false + }; + ops.reset_peek(); + can_fold +} + +/// Check if the next instruction(s) can fold an F64 operand. +#[inline] +fn can_fold_f64<'a>( + ops: &mut itertools::MultiPeek< + impl Iterator, usize), wasmparser::BinaryReaderError>>, + >, +) -> bool { + let can_fold = if let Some(Ok((next_op, _))) = ops.peek() { + if is_f64_foldable_consumer(next_op) { + true + } else if matches!( + next_op, + wasmparser::Operator::F64Const { .. } | wasmparser::Operator::LocalGet { .. } + ) { + if let Some(Ok((next_next_op, _))) = ops.peek() { + is_f64_foldable_consumer(next_next_op) + } else { + false + } + } else { + false + } + } else { + false + }; + ops.reset_peek(); + can_fold +} + +/// Check if the instruction can consume an I32 operand (excluding br_if/if/loads/stores) +/// Note: Load/Store instructions are excluded because the 2-ahead lookahead in can_fold_i32 +/// doesn't verify the type of intervening LocalGet, which can cause incorrect folding. +#[inline] +fn is_i32_foldable_consumer(op: &wasmparser::Operator) -> bool { + matches!( + op, + wasmparser::Operator::I32Add + | wasmparser::Operator::I32Sub + | wasmparser::Operator::I32Mul + | wasmparser::Operator::I32DivS + | wasmparser::Operator::I32DivU + | wasmparser::Operator::I32RemS + | wasmparser::Operator::I32RemU + | wasmparser::Operator::I32And + | wasmparser::Operator::I32Or + | wasmparser::Operator::I32Xor + | wasmparser::Operator::I32Shl + | wasmparser::Operator::I32ShrS + | wasmparser::Operator::I32ShrU + | wasmparser::Operator::I32Rotl + | wasmparser::Operator::I32Rotr + | wasmparser::Operator::I32Eq + | wasmparser::Operator::I32Ne + | wasmparser::Operator::I32LtS + | wasmparser::Operator::I32LtU + | wasmparser::Operator::I32LeS + | wasmparser::Operator::I32LeU + | wasmparser::Operator::I32GtS + | wasmparser::Operator::I32GtU + | wasmparser::Operator::I32GeS + | wasmparser::Operator::I32GeU + | wasmparser::Operator::I32Clz + | wasmparser::Operator::I32Ctz + | wasmparser::Operator::I32Popcnt + | wasmparser::Operator::I32Eqz + ) +} + +/// Check if the instruction is a memory load (addr is I32, single operand) +#[inline] +fn is_memory_load(op: &wasmparser::Operator) -> bool { + matches!( + op, + wasmparser::Operator::I32Load { .. } + | wasmparser::Operator::I64Load { .. } + | wasmparser::Operator::F32Load { .. } + | wasmparser::Operator::F64Load { .. } + | wasmparser::Operator::I32Load8S { .. } + | wasmparser::Operator::I32Load8U { .. } + | wasmparser::Operator::I32Load16S { .. } + | wasmparser::Operator::I32Load16U { .. } + | wasmparser::Operator::I64Load8S { .. } + | wasmparser::Operator::I64Load8U { .. } + | wasmparser::Operator::I64Load16S { .. } + | wasmparser::Operator::I64Load16U { .. } + | wasmparser::Operator::I64Load32S { .. } + | wasmparser::Operator::I64Load32U { .. } + ) +} + +/// Check if the next instruction is a memory load (1-ahead only, for I32 addr folding) +#[inline] +fn can_fold_for_load<'a>( + ops: &mut itertools::MultiPeek< + impl Iterator, usize), wasmparser::BinaryReaderError>>, + >, +) -> bool { + let can_fold = if let Some(Ok((next_op, _))) = ops.peek() { + is_memory_load(next_op) + } else { + false + }; + ops.reset_peek(); + can_fold +} + +/// Check if the instruction is a memory store (for 2-ahead matching) +#[inline] +fn is_memory_store(op: &wasmparser::Operator) -> bool { + matches!( + op, + wasmparser::Operator::I32Store { .. } + | wasmparser::Operator::I64Store { .. } + | wasmparser::Operator::F32Store { .. } + | wasmparser::Operator::F64Store { .. } + | wasmparser::Operator::I32Store8 { .. } + | wasmparser::Operator::I32Store16 { .. } + | wasmparser::Operator::I64Store8 { .. } + | wasmparser::Operator::I64Store16 { .. } + | wasmparser::Operator::I64Store32 { .. } + ) +} + +/// Check if 2-ahead pattern matches [value_producer, store] for addr folding +/// Pattern: addr (current) -> value -> store +#[inline] +fn can_fold_for_store<'a>( + ops: &mut itertools::MultiPeek< + impl Iterator, usize), wasmparser::BinaryReaderError>>, + >, +) -> bool { + let can_fold = if let Some(Ok((value_op, _))) = ops.peek() { + // Check if value_op could produce a value for store + let is_value_candidate = matches!( + value_op, + wasmparser::Operator::I32Const { .. } + | wasmparser::Operator::I64Const { .. } + | wasmparser::Operator::F32Const { .. } + | wasmparser::Operator::F64Const { .. } + | wasmparser::Operator::LocalGet { .. } + ); + if is_value_candidate { + if let Some(Ok((store_op, _))) = ops.peek() { + is_memory_store(store_op) + } else { + false + } + } else { + false + } + } else { + false + }; + ops.reset_peek(); + can_fold +} + +/// Check if the next instruction is local.set with I32 type (for dst folding) +/// Returns Some(local_idx) if folding is possible, None otherwise +#[inline] +fn try_fold_dst_i32<'a>( + ops: &mut itertools::MultiPeek< + impl Iterator, usize), wasmparser::BinaryReaderError>>, + >, + param_types: &[ValueType], + locals: &[(u32, ValueType)], +) -> Option { + let result = if let Some(Ok((next_op, _))) = ops.peek() { + if let wasmparser::Operator::LocalSet { local_index } = next_op { + let local_type = get_local_type(param_types, locals, *local_index); + if matches!(local_type, ValueType::NumType(NumType::I32)) { + Some(*local_index as u16) + } else { + None + } + } else { + None + } + } else { + None + }; + ops.reset_peek(); + result +} + +/// Check if the next instruction is local.set with I64 type (for dst folding) +#[inline] +fn try_fold_dst_i64<'a>( + ops: &mut itertools::MultiPeek< + impl Iterator, usize), wasmparser::BinaryReaderError>>, + >, + param_types: &[ValueType], + locals: &[(u32, ValueType)], +) -> Option { + let result = if let Some(Ok((next_op, _))) = ops.peek() { + if let wasmparser::Operator::LocalSet { local_index } = next_op { + let local_type = get_local_type(param_types, locals, *local_index); + if matches!(local_type, ValueType::NumType(NumType::I64)) { + Some(*local_index as u16) + } else { + None + } + } else { + None + } + } else { + None + }; + ops.reset_peek(); + result +} + +/// Check if the next instruction is local.set with F32 type (for dst folding) +#[inline] +fn try_fold_dst_f32<'a>( + ops: &mut itertools::MultiPeek< + impl Iterator, usize), wasmparser::BinaryReaderError>>, + >, + param_types: &[ValueType], + locals: &[(u32, ValueType)], +) -> Option { + let result = if let Some(Ok((next_op, _))) = ops.peek() { + if let wasmparser::Operator::LocalSet { local_index } = next_op { + let local_type = get_local_type(param_types, locals, *local_index); + if matches!(local_type, ValueType::NumType(NumType::F32)) { + Some(*local_index as u16) + } else { + None + } + } else { + None + } + } else { + None + }; + ops.reset_peek(); + result +} + +/// Check if the next instruction is local.set with F64 type (for dst folding) +#[inline] +fn try_fold_dst_f64<'a>( + ops: &mut itertools::MultiPeek< + impl Iterator, usize), wasmparser::BinaryReaderError>>, + >, + param_types: &[ValueType], + locals: &[(u32, ValueType)], +) -> Option { + let result = if let Some(Ok((next_op, _))) = ops.peek() { + if let wasmparser::Operator::LocalSet { local_index } = next_op { + let local_type = get_local_type(param_types, locals, *local_index); + if matches!(local_type, ValueType::NumType(NumType::F64)) { + Some(*local_index as u16) + } else { + None + } + } else { + None + } + } else { + None + }; + ops.reset_peek(); + result +} + +/// Check if the instruction can consume an I64 operand +#[inline] +fn is_i64_foldable_consumer(op: &wasmparser::Operator) -> bool { + matches!( + op, + wasmparser::Operator::I64Add + | wasmparser::Operator::I64Sub + | wasmparser::Operator::I64Mul + | wasmparser::Operator::I64DivS + | wasmparser::Operator::I64DivU + | wasmparser::Operator::I64RemS + | wasmparser::Operator::I64RemU + | wasmparser::Operator::I64And + | wasmparser::Operator::I64Or + | wasmparser::Operator::I64Xor + | wasmparser::Operator::I64Shl + | wasmparser::Operator::I64ShrS + | wasmparser::Operator::I64ShrU + | wasmparser::Operator::I64Rotl + | wasmparser::Operator::I64Rotr + | wasmparser::Operator::I64Eq + | wasmparser::Operator::I64Ne + | wasmparser::Operator::I64LtS + | wasmparser::Operator::I64LtU + | wasmparser::Operator::I64LeS + | wasmparser::Operator::I64LeU + | wasmparser::Operator::I64GtS + | wasmparser::Operator::I64GtU + | wasmparser::Operator::I64GeS + | wasmparser::Operator::I64GeU + | wasmparser::Operator::I64Clz + | wasmparser::Operator::I64Ctz + | wasmparser::Operator::I64Popcnt + | wasmparser::Operator::I64Eqz + ) +} + +/// Check if the instruction can consume an F32 operand +#[inline] +fn is_f32_foldable_consumer(op: &wasmparser::Operator) -> bool { + matches!( + op, + wasmparser::Operator::F32Add + | wasmparser::Operator::F32Sub + | wasmparser::Operator::F32Mul + | wasmparser::Operator::F32Div + | wasmparser::Operator::F32Min + | wasmparser::Operator::F32Max + | wasmparser::Operator::F32Copysign + | wasmparser::Operator::F32Eq + | wasmparser::Operator::F32Ne + | wasmparser::Operator::F32Lt + | wasmparser::Operator::F32Le + | wasmparser::Operator::F32Gt + | wasmparser::Operator::F32Ge + | wasmparser::Operator::F32Abs + | wasmparser::Operator::F32Neg + | wasmparser::Operator::F32Ceil + | wasmparser::Operator::F32Floor + | wasmparser::Operator::F32Trunc + | wasmparser::Operator::F32Nearest + | wasmparser::Operator::F32Sqrt + ) +} + +/// Check if the instruction can consume an F64 operand +#[inline] +fn is_f64_foldable_consumer(op: &wasmparser::Operator) -> bool { + matches!( + op, + wasmparser::Operator::F64Add + | wasmparser::Operator::F64Sub + | wasmparser::Operator::F64Mul + | wasmparser::Operator::F64Div + | wasmparser::Operator::F64Min + | wasmparser::Operator::F64Max + | wasmparser::Operator::F64Copysign + | wasmparser::Operator::F64Eq + | wasmparser::Operator::F64Ne + | wasmparser::Operator::F64Lt + | wasmparser::Operator::F64Le + | wasmparser::Operator::F64Gt + | wasmparser::Operator::F64Ge + | wasmparser::Operator::F64Abs + | wasmparser::Operator::F64Neg + | wasmparser::Operator::F64Ceil + | wasmparser::Operator::F64Floor + | wasmparser::Operator::F64Trunc + | wasmparser::Operator::F64Nearest + | wasmparser::Operator::F64Sqrt + ) +} + /// Control block information tracked during instruction decoding. /// /// Each control structure (block, loop, if) pushes an entry onto the control @@ -978,7 +1507,7 @@ fn preprocess_instructions( let default_result_regs = default_info.2; // Compute target_ip for each target (keeping existing result_regs) - let mut resolved_reg_targets: Vec<(u32, usize, Vec)> = Vec::new(); + let mut resolved_reg_targets: Vec<(u32, usize, RegSlice)> = Vec::new(); for (rel_depth, _, result_regs) in targets_clone.iter() { let depth = *rel_depth as usize; if current_control_stack_pass3.len() <= depth { @@ -1185,7 +1714,7 @@ fn preprocess_instructions( reg_targets[i] = ( *original_wasm_depth as u32, *target_ip, - target_result_regs.clone(), + target_result_regs.clone().into_boxed_slice(), ); } } @@ -1201,7 +1730,7 @@ fn preprocess_instructions( *reg_default = ( *original_wasm_depth as u32, *target_ip, - target_result_regs.clone(), + target_result_regs.clone().into_boxed_slice(), ); } } @@ -1244,7 +1773,7 @@ fn get_local_type( // First, check if the index is within the parameters range if index < params.len() { - return params[index].clone(); + return params[index]; } // Subtract parameter count to get index into locals @@ -1253,7 +1782,7 @@ fn get_local_type( // Now search through declared locals for (count, vtype) in locals { if index < *count as usize { - return vtype.clone(); + return *vtype; } index -= *count as usize; } @@ -1269,7 +1798,7 @@ fn get_global_type(module: &Module, global_index: u32) -> ValueType { for import in &module.imports { if let ImportDesc::Global(global_type) = &import.desc { if imported_global_count == global_index { - return global_type.1.clone(); + return global_type.1; } imported_global_count += 1; } @@ -1277,7 +1806,7 @@ fn get_global_type(module: &Module, global_index: u32) -> ValueType { let local_global_index = (global_index - imported_global_count) as usize; if local_global_index < module.globals.len() { - return module.globals[local_global_index].type_.1.clone(); + return module.globals[local_global_index].type_.1; } ValueType::NumType(NumType::I32) @@ -1292,7 +1821,7 @@ fn get_table_element_type(module: &Module, table_index: u32) -> ValueType { for import in &module.imports { if let ImportDesc::Table(table_type) = &import.desc { if imported_table_count == table_index { - return ValueType::RefType(table_type.1.clone()); + return ValueType::RefType(table_type.1); } imported_table_count += 1; } @@ -1301,7 +1830,7 @@ fn get_table_element_type(module: &Module, table_index: u32) -> ValueType { // Check module-defined tables let local_table_index = (table_index - imported_table_count) as usize; if local_table_index < module.tables.len() { - return ValueType::RefType(module.tables[local_table_index].type_.1.clone()); + return ValueType::RefType(module.tables[local_table_index].type_.1); } // Default to funcref @@ -1371,6 +1900,9 @@ fn decode_processed_instrs_and_fixups<'a>( // Track unreachable code depth (after br, return, unreachable, br_table) let mut unreachable_depth: usize = 0; + // Pending operands for folding (stack for multiple operands) + let mut pending_operands: Vec = Vec::new(); + loop { if ops.peek().is_none() { break; @@ -1416,63 +1948,94 @@ fn decode_processed_instrs_and_fixups<'a>( let local_type = get_local_type(param_types, locals, *local_index); match local_type { ValueType::NumType(NumType::I32) => { - let dst = allocator.push(local_type); - ( - ProcessedInstr::I32Reg { - handler_index: HANDLER_IDX_LOCAL_GET, - dst: dst.index(), - src1: I32RegOperand::Param(*local_index as u16), - src2: None, - }, - None, - ) + if can_fold_i32(&mut ops) + || can_fold_for_load(&mut ops) + || can_fold_for_store(&mut ops) + { + pending_operands + .push(PendingOperand::I32Local(*local_index as u16)); + allocator.push(local_type); + (None, None) + } else { + let dst = allocator.push(local_type); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_LOCAL_GET, + dst: I32RegOperand::Reg(dst.index()), + src1: I32RegOperand::Param(*local_index as u16), + src2: None, + }), + None, + ) + } } ValueType::NumType(NumType::I64) => { - let dst = allocator.push(local_type); - ( - ProcessedInstr::I64Reg { - handler_index: HANDLER_IDX_LOCAL_GET, - dst, - src1: I64RegOperand::Param(*local_index as u16), - src2: None, - }, - None, - ) + if can_fold_i64(&mut ops) { + pending_operands + .push(PendingOperand::I64Local(*local_index as u16)); + allocator.push(local_type); + (None, None) + } else { + let dst = allocator.push(local_type); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_LOCAL_GET, + dst: I64RegOperand::Reg(dst.index()), + src1: I64RegOperand::Param(*local_index as u16), + src2: None, + }), + None, + ) + } } ValueType::NumType(NumType::F32) => { - let dst = allocator.push(local_type); - ( - ProcessedInstr::F32Reg { - handler_index: HANDLER_IDX_LOCAL_GET, - dst, - src1: F32RegOperand::Param(*local_index as u16), - src2: None, - }, - None, - ) + if can_fold_f32(&mut ops) { + pending_operands + .push(PendingOperand::F32Local(*local_index as u16)); + allocator.push(local_type); + (None, None) + } else { + let dst = allocator.push(local_type); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_LOCAL_GET, + dst: F32RegOperand::Reg(dst.index()), + src1: F32RegOperand::Param(*local_index as u16), + src2: None, + }), + None, + ) + } } ValueType::NumType(NumType::F64) => { - let dst = allocator.push(local_type); - ( - ProcessedInstr::F64Reg { - handler_index: HANDLER_IDX_LOCAL_GET, - dst, - src1: F64RegOperand::Param(*local_index as u16), - src2: None, - }, - None, - ) + if can_fold_f64(&mut ops) { + pending_operands + .push(PendingOperand::F64Local(*local_index as u16)); + allocator.push(local_type); + (None, None) + } else { + let dst = allocator.push(local_type); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_LOCAL_GET, + dst: F64RegOperand::Reg(dst.index()), + src1: F64RegOperand::Param(*local_index as u16), + src2: None, + }), + None, + ) + } } ValueType::RefType(_) => { - // For RefType, use RefLocalReg + // For RefType, use RefLocalReg (no folding for ref types) let dst = allocator.push(local_type); ( - ProcessedInstr::RefLocalReg { + Some(ProcessedInstr::RefLocalReg { handler_index: HANDLER_IDX_REF_LOCAL_GET_REG, dst: dst.index(), src: 0, // unused for get local_idx: *local_index as u16, - }, + }), None, ) } @@ -1487,45 +2050,45 @@ fn decode_processed_instrs_and_fixups<'a>( let src = allocator.pop(&local_type); let src_idx = src.index(); macro_rules! make_local_set { - ($instr:ident, $reg:ident, $operand:ident) => { + ($instr:ident, $operand:ident) => { ( - ProcessedInstr::$instr { + Some(ProcessedInstr::$instr { handler_index: HANDLER_IDX_LOCAL_SET, - dst: Reg::$reg(local_idx), + dst: $operand::Param(local_idx), src1: $operand::Reg(src_idx), src2: None, - }, + }), None, ) }; } match local_type { ValueType::NumType(NumType::I32) => ( - ProcessedInstr::I32Reg { + Some(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_LOCAL_SET, - dst: local_idx, + dst: I32RegOperand::Param(local_idx), src1: I32RegOperand::Reg(src_idx), src2: None, - }, + }), None, ), ValueType::NumType(NumType::I64) => { - make_local_set!(I64Reg, I64, I64RegOperand) + make_local_set!(I64Reg, I64RegOperand) } ValueType::NumType(NumType::F32) => { - make_local_set!(F32Reg, F32, F32RegOperand) + make_local_set!(F32Reg, F32RegOperand) } ValueType::NumType(NumType::F64) => { - make_local_set!(F64Reg, F64, F64RegOperand) + make_local_set!(F64Reg, F64RegOperand) } ValueType::RefType(_) => { ( - ProcessedInstr::RefLocalReg { + Some(ProcessedInstr::RefLocalReg { handler_index: HANDLER_IDX_REF_LOCAL_SET_REG, dst: 0, // unused for set src: src_idx, local_idx, - }, + }), None, ) } @@ -1541,45 +2104,45 @@ fn decode_processed_instrs_and_fixups<'a>( // Peek the top register (don't pop - value stays on stack) let src_idx = allocator.peek(&local_type).unwrap().index(); macro_rules! make_local_tee { - ($instr:ident, $reg:ident, $operand:ident) => { + ($instr:ident, $operand:ident) => { ( - ProcessedInstr::$instr { + Some(ProcessedInstr::$instr { handler_index: HANDLER_IDX_LOCAL_SET, // Reuse local.set handler - dst: Reg::$reg(local_idx), + dst: $operand::Param(local_idx), src1: $operand::Reg(src_idx), src2: None, - }, + }), None, ) }; } match local_type { ValueType::NumType(NumType::I32) => ( - ProcessedInstr::I32Reg { + Some(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_LOCAL_SET, - dst: local_idx, + dst: I32RegOperand::Param(local_idx), src1: I32RegOperand::Reg(src_idx), src2: None, - }, + }), None, ), ValueType::NumType(NumType::I64) => { - make_local_tee!(I64Reg, I64, I64RegOperand) + make_local_tee!(I64Reg, I64RegOperand) } ValueType::NumType(NumType::F32) => { - make_local_tee!(F32Reg, F32, F32RegOperand) + make_local_tee!(F32Reg, F32RegOperand) } ValueType::NumType(NumType::F64) => { - make_local_tee!(F64Reg, F64, F64RegOperand) + make_local_tee!(F64Reg, F64RegOperand) } ValueType::RefType(_) => { ( - ProcessedInstr::RefLocalReg { + Some(ProcessedInstr::RefLocalReg { handler_index: HANDLER_IDX_REF_LOCAL_SET_REG, dst: 0, // unused for set src: src_idx, local_idx, - }, + }), None, ) } @@ -1592,48 +2155,104 @@ fn decode_processed_instrs_and_fixups<'a>( let global_type = get_global_type(module, *global_index); match global_type { ValueType::NumType(NumType::I32) => { - let dst = allocator.push(global_type); - ( - ProcessedInstr::GlobalGetReg { + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) + { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::GlobalGetReg { handler_index: HANDLER_IDX_GLOBAL_GET_I32, - dst, + dst: RegOrLocal::Local(local_idx), global_index: *global_index, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::GlobalGetReg { + handler_index: HANDLER_IDX_GLOBAL_GET_I32, + dst: RegOrLocal::Reg(dst.index()), + global_index: *global_index, + }), + None, + ) + } } ValueType::NumType(NumType::I64) => { - let dst = allocator.push(global_type); - ( - ProcessedInstr::GlobalGetReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) + { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::GlobalGetReg { handler_index: HANDLER_IDX_GLOBAL_GET_I64, - dst, + dst: RegOrLocal::Local(local_idx), global_index: *global_index, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::GlobalGetReg { + handler_index: HANDLER_IDX_GLOBAL_GET_I64, + dst: RegOrLocal::Reg(dst.index()), + global_index: *global_index, + }), + None, + ) + } } ValueType::NumType(NumType::F32) => { - let dst = allocator.push(global_type); - ( - ProcessedInstr::GlobalGetReg { + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) + { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::GlobalGetReg { handler_index: HANDLER_IDX_GLOBAL_GET_F32, - dst, + dst: RegOrLocal::Local(local_idx), global_index: *global_index, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::GlobalGetReg { + handler_index: HANDLER_IDX_GLOBAL_GET_F32, + dst: RegOrLocal::Reg(dst.index()), + global_index: *global_index, + }), + None, + ) + } } ValueType::NumType(NumType::F64) => { - let dst = allocator.push(global_type); - ( - ProcessedInstr::GlobalGetReg { + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) + { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::GlobalGetReg { handler_index: HANDLER_IDX_GLOBAL_GET_F64, - dst, + dst: RegOrLocal::Local(local_idx), global_index: *global_index, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::GlobalGetReg { + handler_index: HANDLER_IDX_GLOBAL_GET_F64, + dst: RegOrLocal::Reg(dst.index()), + global_index: *global_index, + }), + None, + ) + } } _ => { panic!("Unsupported type for GlobalGet: {:?}", global_type); @@ -1644,46 +2263,66 @@ fn decode_processed_instrs_and_fixups<'a>( let global_type = get_global_type(module, *global_index); match global_type { ValueType::NumType(NumType::I32) => { - let src = allocator.pop(&global_type); + let src_reg = allocator.pop(&global_type); + let operand = take_i32_operand(&mut pending_operands, src_reg.index()); + let src = match operand { + I32RegOperand::Param(idx) => RegOrLocal::Local(idx), + _ => RegOrLocal::Reg(src_reg.index()), + }; ( - ProcessedInstr::GlobalSetReg { + Some(ProcessedInstr::GlobalSetReg { handler_index: HANDLER_IDX_GLOBAL_SET_I32, src, global_index: *global_index, - }, + }), None, ) } ValueType::NumType(NumType::I64) => { - let src = allocator.pop(&global_type); + let src_reg = allocator.pop(&global_type); + let operand = take_i64_operand(&mut pending_operands, src_reg.index()); + let src = match operand { + I64RegOperand::Param(idx) => RegOrLocal::Local(idx), + _ => RegOrLocal::Reg(src_reg.index()), + }; ( - ProcessedInstr::GlobalSetReg { + Some(ProcessedInstr::GlobalSetReg { handler_index: HANDLER_IDX_GLOBAL_SET_I64, src, global_index: *global_index, - }, + }), None, ) } ValueType::NumType(NumType::F32) => { - let src = allocator.pop(&global_type); + let src_reg = allocator.pop(&global_type); + let operand = take_f32_operand(&mut pending_operands, src_reg.index()); + let src = match operand { + F32RegOperand::Param(idx) => RegOrLocal::Local(idx), + _ => RegOrLocal::Reg(src_reg.index()), + }; ( - ProcessedInstr::GlobalSetReg { + Some(ProcessedInstr::GlobalSetReg { handler_index: HANDLER_IDX_GLOBAL_SET_F32, src, global_index: *global_index, - }, + }), None, ) } ValueType::NumType(NumType::F64) => { - let src = allocator.pop(&global_type); + let src_reg = allocator.pop(&global_type); + let operand = take_f64_operand(&mut pending_operands, src_reg.index()); + let src = match operand { + F64RegOperand::Param(idx) => RegOrLocal::Local(idx), + _ => RegOrLocal::Reg(src_reg.index()), + }; ( - ProcessedInstr::GlobalSetReg { + Some(ProcessedInstr::GlobalSetReg { handler_index: HANDLER_IDX_GLOBAL_SET_F64, src, global_index: *global_index, - }, + }), None, ) } @@ -1693,1483 +2332,2816 @@ fn decode_processed_instrs_and_fixups<'a>( } } wasmparser::Operator::I32Const { value } => { - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { - handler_index: HANDLER_IDX_I32_CONST, - dst: dst.index(), - src1: I32RegOperand::Const(*value), - src2: None, - }, - None, - ) + if can_fold_i32(&mut ops) + || can_fold_for_load(&mut ops) + || can_fold_for_store(&mut ops) + { + pending_operands.push(PendingOperand::I32Const(*value)); + allocator.push(ValueType::NumType(NumType::I32)); + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_CONST, + dst: I32RegOperand::Reg(dst.index()), + src1: I32RegOperand::Const(*value), + src2: None, + }), + None, + ) + } } // Binary operations - macro to reduce repetition wasmparser::Operator::I32Add => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + // Check if next instruction is local.set with I32 type + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + // Consume the local.set instruction + let _ = ops.next(); + // Push and immediately pop to maintain stack consistency + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + // Insert the instruction that writes directly to local + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_ADD, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + // Insert NopReg for the consumed local.set + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_ADD, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32Sub => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_SUB, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_SUB, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32Mul => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_MUL, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_MUL, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32DivS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_DIV_S, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_DIV_S, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32DivU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_DIV_U, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_DIV_U, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32RemS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_REM_S, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_REM_S, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32RemU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_REM_U, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_REM_U, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32And => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_AND, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_AND, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32Or => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_OR, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_OR, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32Xor => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_XOR, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_XOR, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32Shl => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_SHL, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_SHL, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32ShrS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_SHR_S, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_SHR_S, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32ShrU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_SHR_U, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_SHR_U, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32Rotl => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_ROTL, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_ROTL, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32Rotr => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_ROTR, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_ROTR, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } // Comparison operations wasmparser::Operator::I32Eq => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_EQ, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_EQ, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32Ne => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_NE, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_NE, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32LtS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_LT_S, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_LT_S, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32LtU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_LT_U, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_LT_U, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32LeS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_LE_S, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_LE_S, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32LeU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_LE_U, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_LE_U, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32GtS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_GT_S, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_GT_S, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32GtU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_GT_U, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_GT_U, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32GeS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_GE_S, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_GE_S, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I32GeU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src2 = take_i32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_GE_U, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), - src2: Some(I32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_GE_U, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } // Unary operations wasmparser::Operator::I32Clz => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_CLZ, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), + dst: I32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_CLZ, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::I32Ctz => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_CTZ, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), + dst: I32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_CTZ, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::I32Popcnt => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_POPCNT, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), + dst: I32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_POPCNT, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::I32Eqz => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_EQZ, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), + dst: I32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_EQZ, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::I32Extend8S => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_EXTEND8_S, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), + dst: I32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_EXTEND8_S, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::I32Extend16S => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::I32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let src1 = take_i32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::I32Reg { handler_index: HANDLER_IDX_I32_EXTEND16_S, - dst: dst.index(), - src1: I32RegOperand::Reg(src1.index()), + dst: I32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::I32Reg { + handler_index: HANDLER_IDX_I32_EXTEND16_S, + dst: I32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } // ============================================================================ // I64 Register-based instructions // ============================================================================ wasmparser::Operator::I64Const { value } => { - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { - handler_index: HANDLER_IDX_I64_CONST, - dst, - src1: I64RegOperand::Const(*value), - src2: None, - }, - None, - ) + if can_fold_i64(&mut ops) { + pending_operands.push(PendingOperand::I64Const(*value)); + allocator.push(ValueType::NumType(NumType::I64)); + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_CONST, + dst: I64RegOperand::Reg(dst.index()), + src1: I64RegOperand::Const(*value), + src2: None, + }), + None, + ) + } } // I64 Binary arithmetic operations wasmparser::Operator::I64Add => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_ADD, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_ADD, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64Sub => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_SUB, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_SUB, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64Mul => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_MUL, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_MUL, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64DivS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_DIV_S, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_DIV_S, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64DivU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_DIV_U, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_DIV_U, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64RemS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_REM_S, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_REM_S, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64RemU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_REM_U, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_REM_U, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } // I64 Binary bitwise operations wasmparser::Operator::I64And => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_AND, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_AND, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64Or => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_OR, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_OR, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64Xor => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_XOR, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_XOR, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64Shl => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_SHL, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_SHL, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64ShrS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_SHR_S, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_SHR_S, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64ShrU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_SHR_U, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_SHR_U, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64Rotl => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_ROTL, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_ROTL, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::I64Rotr => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_ROTR, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: I64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_ROTR, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } // I64 Unary operations wasmparser::Operator::I64Clz => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_CLZ, - dst, - src1: I64RegOperand::Reg(src1.index()), + dst: I64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_CLZ, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::I64Ctz => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_CTZ, - dst, - src1: I64RegOperand::Reg(src1.index()), + dst: I64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_CTZ, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::I64Popcnt => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_POPCNT, - dst, - src1: I64RegOperand::Reg(src1.index()), + dst: I64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_POPCNT, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::I64Extend8S => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_EXTEND8_S, - dst, - src1: I64RegOperand::Reg(src1.index()), + dst: I64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_EXTEND8_S, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::I64Extend16S => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_EXTEND16_S, - dst, - src1: I64RegOperand::Reg(src1.index()), + dst: I64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_EXTEND16_S, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::I64Extend32S => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::I64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_EXTEND32_S, - dst, - src1: I64RegOperand::Reg(src1.index()), + dst: I64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::I64Reg { + handler_index: HANDLER_IDX_I64_EXTEND32_S, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } // I64 Comparison operations (return i32) wasmparser::Operator::I64Eqz => { - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::I64Reg { + Some(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_EQZ, - dst, - src1: I64RegOperand::Reg(src1.index()), + dst: I64RegOperand::Reg(dst.index()), + src1, src2: None, - }, + }), None, ) } wasmparser::Operator::I64Eq => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::I64Reg { + Some(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_EQ, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::I64Ne => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::I64Reg { + Some(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_NE, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::I64LtS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::I64Reg { + Some(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_LT_S, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::I64LtU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::I64Reg { + Some(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_LT_U, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::I64GtS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::I64Reg { + Some(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_GT_S, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::I64GtU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::I64Reg { + Some(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_GT_U, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::I64LeS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::I64Reg { + Some(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_LE_S, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::I64LeU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::I64Reg { + Some(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_LE_U, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::I64GeS => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::I64Reg { + Some(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_GE_S, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::I64GeU => { - let src2 = allocator.pop(&ValueType::NumType(NumType::I64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::I64)); + let src2 = take_i64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_i64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::I64Reg { + Some(ProcessedInstr::I64Reg { handler_index: HANDLER_IDX_I64_GE_U, - dst, - src1: I64RegOperand::Reg(src1.index()), - src2: Some(I64RegOperand::Reg(src2.index())), - }, + dst: I64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } // F32 Const wasmparser::Operator::F32Const { value } => { - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { - handler_index: HANDLER_IDX_F32_CONST, - dst, - src1: F32RegOperand::Const(f32::from_bits(value.bits())), - src2: None, - }, - None, - ) + if can_fold_f32(&mut ops) { + pending_operands + .push(PendingOperand::F32Const(f32::from_bits(value.bits()))); + allocator.push(ValueType::NumType(NumType::F32)); + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_CONST, + dst: F32RegOperand::Reg(dst.index()), + src1: F32RegOperand::Const(f32::from_bits(value.bits())), + src2: None, + }), + None, + ) + } } // F32 Binary arithmetic operations wasmparser::Operator::F32Add => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_ADD, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_ADD, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F32Sub => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_SUB, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_SUB, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F32Mul => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_MUL, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_MUL, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F32Div => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_DIV, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_DIV, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F32Min => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_MIN, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_MIN, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F32Max => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_MAX, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_MAX, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F32Copysign => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_COPYSIGN, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F32RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_COPYSIGN, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } // F32 Unary operations wasmparser::Operator::F32Abs => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_ABS, - dst, - src1: F32RegOperand::Reg(src1.index()), + dst: F32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_ABS, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F32Neg => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_NEG, - dst, - src1: F32RegOperand::Reg(src1.index()), + dst: F32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_NEG, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F32Ceil => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_CEIL, - dst, - src1: F32RegOperand::Reg(src1.index()), + dst: F32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_CEIL, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F32Floor => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_FLOOR, - dst, - src1: F32RegOperand::Reg(src1.index()), + dst: F32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_FLOOR, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F32Trunc => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_TRUNC, - dst, - src1: F32RegOperand::Reg(src1.index()), + dst: F32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_TRUNC, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F32Nearest => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_NEAREST, - dst, - src1: F32RegOperand::Reg(src1.index()), + dst: F32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_NEAREST, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F32Sqrt => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::F32Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_SQRT, - dst, - src1: F32RegOperand::Reg(src1.index()), + dst: F32RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::F32Reg { + handler_index: HANDLER_IDX_F32_SQRT, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } // F32 Comparison operations (return i32) wasmparser::Operator::F32Eq => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F32Reg { + Some(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_EQ, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::F32Ne => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F32Reg { + Some(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_NE, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::F32Lt => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F32Reg { + Some(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_LT, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::F32Gt => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F32Reg { + Some(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_GT, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::F32Le => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F32Reg { + Some(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_LE, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::F32Ge => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F32)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F32)); + let src2 = take_f32_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f32_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F32Reg { + Some(ProcessedInstr::F32Reg { handler_index: HANDLER_IDX_F32_GE, - dst, - src1: F32RegOperand::Reg(src1.index()), - src2: Some(F32RegOperand::Reg(src2.index())), - }, + dst: F32RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } // F64 Const wasmparser::Operator::F64Const { value } => { - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { - handler_index: HANDLER_IDX_F64_CONST, - dst, - src1: F64RegOperand::Const(f64::from_bits(value.bits())), - src2: None, - }, - None, - ) + if can_fold_f64(&mut ops) { + pending_operands + .push(PendingOperand::F64Const(f64::from_bits(value.bits()))); + allocator.push(ValueType::NumType(NumType::F64)); + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_CONST, + dst: F64RegOperand::Reg(dst.index()), + src1: F64RegOperand::Const(f64::from_bits(value.bits())), + src2: None, + }), + None, + ) + } } // F64 Binary arithmetic operations wasmparser::Operator::F64Add => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_ADD, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_ADD, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F64Sub => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_SUB, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_SUB, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F64Mul => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_MUL, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_MUL, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F64Div => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_DIV, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_DIV, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F64Min => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_MIN, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_MIN, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F64Max => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_MAX, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_MAX, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } wasmparser::Operator::F64Copysign => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_COPYSIGN, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, - None, - ) + dst: F64RegOperand::Param(local_idx), + src1, + src2: Some(src2), + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_COPYSIGN, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), + None, + ) + } } // F64 Unary operations wasmparser::Operator::F64Abs => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_ABS, - dst, - src1: F64RegOperand::Reg(src1.index()), + dst: F64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_ABS, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F64Neg => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_NEG, - dst, - src1: F64RegOperand::Reg(src1.index()), + dst: F64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_NEG, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F64Ceil => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_CEIL, - dst, - src1: F64RegOperand::Reg(src1.index()), + dst: F64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_CEIL, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F64Floor => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_FLOOR, - dst, - src1: F64RegOperand::Reg(src1.index()), + dst: F64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_FLOOR, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F64Trunc => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_TRUNC, - dst, - src1: F64RegOperand::Reg(src1.index()), + dst: F64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_TRUNC, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F64Nearest => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_NEAREST, - dst, - src1: F64RegOperand::Reg(src1.index()), + dst: F64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_NEAREST, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } wasmparser::Operator::F64Sqrt => { - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::F64Reg { + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_SQRT, - dst, - src1: F64RegOperand::Reg(src1.index()), + dst: F64RegOperand::Param(local_idx), + src1, src2: None, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::F64Reg { + handler_index: HANDLER_IDX_F64_SQRT, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: None, + }), + None, + ) + } } // F64 Comparison operations (return i32) wasmparser::Operator::F64Eq => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F64Reg { + Some(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_EQ, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::F64Ne => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F64Reg { + Some(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_NE, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::F64Lt => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F64Reg { + Some(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_LT, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::F64Gt => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F64Reg { + Some(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_GT, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::F64Le => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F64Reg { + Some(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_LE, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } wasmparser::Operator::F64Ge => { - let src2 = allocator.pop(&ValueType::NumType(NumType::F64)); - let src1 = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src1_reg = allocator.pop(&ValueType::NumType(NumType::F64)); + let src2 = take_f64_operand(&mut pending_operands, src2_reg.index()); + let src1 = take_f64_operand(&mut pending_operands, src1_reg.index()); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::F64Reg { + Some(ProcessedInstr::F64Reg { handler_index: HANDLER_IDX_F64_GE, - dst, - src1: F64RegOperand::Reg(src1.index()), - src2: Some(F64RegOperand::Reg(src2.index())), - }, + dst: F64RegOperand::Reg(dst.index()), + src1, + src2: Some(src2), + }), None, ) } @@ -3212,10 +5184,10 @@ fn decode_processed_instrs_and_fixups<'a>( // Use EndReg for register-based execution let instr = ProcessedInstr::EndReg { - source_regs, - target_result_regs, + source_regs: source_regs.into_boxed_slice(), + target_result_regs: target_result_regs.into_boxed_slice(), }; - (instr, None) + (Some(instr), None) } wasmparser::Operator::Block { blockty } | wasmparser::Operator::Loop { blockty } => { @@ -3243,7 +5215,7 @@ fn decode_processed_instrs_and_fixups<'a>( // Push params back - they're still on the stack inside the block for vtype in param_types.iter() { - allocator.push(vtype.clone()); + allocator.push(*vtype); } let arity = result_types.len(); @@ -3253,8 +5225,8 @@ fn decode_processed_instrs_and_fixups<'a>( control_info_stack.push(ControlBlockInfo { block_type: *blockty, is_loop, - result_regs, - param_regs: vec![], + result_regs: result_regs.into(), + param_regs: Vec::new(), }); let instr = ProcessedInstr::BlockReg { @@ -3262,7 +5234,7 @@ fn decode_processed_instrs_and_fixups<'a>( param_count, is_loop, }; - (instr, None) + (Some(instr), None) } wasmparser::Operator::If { blockty } => { let cond_reg = allocator.pop(&ValueType::NumType(NumType::I32)); @@ -3287,7 +5259,7 @@ fn decode_processed_instrs_and_fixups<'a>( }; for vtype in param_types.iter() { - allocator.push(vtype.clone()); + allocator.push(*vtype); } let arity = result_types.len(); @@ -3296,8 +5268,8 @@ fn decode_processed_instrs_and_fixups<'a>( control_info_stack.push(ControlBlockInfo { block_type: *blockty, is_loop: false, - result_regs, - param_regs: vec![], + result_regs: result_regs.into(), + param_regs: Vec::new(), }); let instr = ProcessedInstr::IfReg { @@ -3311,9 +5283,9 @@ fn decode_processed_instrs_and_fixups<'a>( original_wasm_depth: 0, is_if_false_jump: true, is_else_jump: false, - source_regs: vec![], + source_regs: Vec::new(), }); - (instr, fixup) + (Some(instr), fixup) } wasmparser::Operator::Else => { if let Some(state) = allocator_state_stack.last() { @@ -3324,7 +5296,7 @@ fn decode_processed_instrs_and_fixups<'a>( if let Some(block_info) = control_info_stack.last() { let param_types = get_block_param_types(&block_info.block_type, module); for vtype in param_types.iter() { - allocator.push(vtype.clone()); + allocator.push(*vtype); } } @@ -3337,9 +5309,9 @@ fn decode_processed_instrs_and_fixups<'a>( original_wasm_depth: 0, is_if_false_jump: false, is_else_jump: true, - source_regs: vec![], + source_regs: Vec::new(), }); - (instr, fixup) + (Some(instr), fixup) } wasmparser::Operator::Call { function_index } => { @@ -3371,17 +5343,17 @@ fn decode_processed_instrs_and_fixups<'a>( } let result_reg = if let Some(result_type) = result_types.first() { - Some(allocator.push(result_type.clone())) + Some(allocator.push(*result_type)) } else { None }; ( - ProcessedInstr::CallWasiReg { + Some(ProcessedInstr::CallWasiReg { wasi_func_type: wasi_type, - param_regs, + param_regs: param_regs.into_boxed_slice(), result_reg, - }, + }), None, ) } else { @@ -3420,11 +5392,11 @@ fn decode_processed_instrs_and_fixups<'a>( if param_types.is_empty() && result_types.is_empty() { // No params/results - still use CallReg with empty registers ( - ProcessedInstr::CallReg { + Some(ProcessedInstr::CallReg { func_idx: FuncIdx(*function_index), - param_regs: vec![], - result_regs: vec![], - }, + param_regs: Box::new([]), + result_regs: Box::new([]), + }), None, ) } else { @@ -3440,17 +5412,17 @@ fn decode_processed_instrs_and_fixups<'a>( // Push result types to allocator and collect result_regs let mut result_regs = Vec::new(); for result_type in &result_types { - let reg = allocator.push(result_type.clone()); + let reg = allocator.push(*result_type); result_regs.push(reg); } // Use CallReg for register-based execution let instr = ProcessedInstr::CallReg { func_idx: FuncIdx(*function_index), - param_regs, - result_regs, + param_regs: param_regs.into_boxed_slice(), + result_regs: result_regs.into_boxed_slice(), }; - (instr, None) + (Some(instr), None) } } } @@ -3474,8 +5446,7 @@ fn decode_processed_instrs_and_fixups<'a>( // Get param registers (in order, from bottom to top) // We need to collect the param registers before popping them - let param_count = param_types.len(); - let mut param_regs = Vec::with_capacity(param_count); + let mut param_regs = Vec::new(); for param_type in param_types.iter() { if let Some(reg) = allocator.peek(param_type) { param_regs.push(reg); @@ -3494,7 +5465,7 @@ fn decode_processed_instrs_and_fixups<'a>( // Push result types to allocator and collect result_regs let mut result_regs = Vec::new(); for result_type in &result_types_vec { - let reg = allocator.push(result_type.clone()); + let reg = allocator.push(*result_type); result_regs.push(reg); } @@ -3503,10 +5474,10 @@ fn decode_processed_instrs_and_fixups<'a>( type_idx: TypeIdx(*type_index), table_idx: TableIdx(*table_index), index_reg, - param_regs, - result_regs, + param_regs: param_regs.into_boxed_slice(), + result_regs: result_regs.into_boxed_slice(), }; - (instr, None) + (Some(instr), None) } wasmparser::Operator::Br { relative_depth } => { @@ -3520,8 +5491,8 @@ fn decode_processed_instrs_and_fixups<'a>( let instr = ProcessedInstr::BrReg { relative_depth: *relative_depth, target_ip: usize::MAX, // Will be set by fixup - source_regs: source_regs.clone(), - target_result_regs, + source_regs: source_regs.clone().into_boxed_slice(), + target_result_regs: target_result_regs.into_boxed_slice(), }; let fixup = FixupInfo { pc: current_processed_pc, @@ -3530,7 +5501,7 @@ fn decode_processed_instrs_and_fixups<'a>( is_else_jump: false, source_regs, }; - (instr, Some(fixup)) + (Some(instr), Some(fixup)) } wasmparser::Operator::BrIf { relative_depth } => { // Pop condition register @@ -3551,8 +5522,8 @@ fn decode_processed_instrs_and_fixups<'a>( relative_depth: *relative_depth, target_ip: usize::MAX, // Will be set by fixup cond_reg, - source_regs: source_regs.clone(), - target_result_regs, + source_regs: source_regs.clone().into_boxed_slice(), + target_result_regs: target_result_regs.into_boxed_slice(), }; let fixup = FixupInfo { pc: current_processed_pc, @@ -3561,7 +5532,7 @@ fn decode_processed_instrs_and_fixups<'a>( is_else_jump: false, source_regs, }; - (instr, Some(fixup)) + (Some(instr), Some(fixup)) } wasmparser::Operator::BrTable { ref targets } => { // Pop index register @@ -3575,7 +5546,7 @@ fn decode_processed_instrs_and_fixups<'a>( let target_depths: Vec = targets.targets().collect::, _>>()?; - let mut table_targets: Vec<(u32, usize, Vec)> = + let mut table_targets: Vec<(u32, usize, RegSlice)> = Vec::with_capacity(target_depths.len()); for depth in target_depths.iter() { let (_, target_result_regs) = compute_branch_regs( @@ -3583,7 +5554,11 @@ fn decode_processed_instrs_and_fixups<'a>( *depth as usize, reg_allocator.as_ref(), ); - table_targets.push((*depth, usize::MAX, target_result_regs)); + table_targets.push(( + *depth, + usize::MAX, + target_result_regs.into_boxed_slice(), + )); // target_ip will be set by fixup } @@ -3593,13 +5568,17 @@ fn decode_processed_instrs_and_fixups<'a>( targets.default() as usize, reg_allocator.as_ref(), ); - let default_target = (targets.default(), usize::MAX, default_result_regs); // target_ip will be set by fixup + let default_target = ( + targets.default(), + usize::MAX, + default_result_regs.into_boxed_slice(), + ); // target_ip will be set by fixup let instr = ProcessedInstr::BrTableReg { targets: table_targets.clone(), default_target, index_reg, - source_regs: source_regs.clone(), + source_regs: source_regs.clone().into_boxed_slice(), }; // Create fixups for each target and default @@ -3611,7 +5590,7 @@ fn decode_processed_instrs_and_fixups<'a>( is_else_jump: false, source_regs, }; - (instr, Some(fixup)) + (Some(instr), Some(fixup)) } wasmparser::Operator::Return => { // Get result registers based on function result types @@ -3620,711 +5599,1361 @@ fn decode_processed_instrs_and_fixups<'a>( allocator.pop(result_type); } - let instr = ProcessedInstr::ReturnReg { result_regs }; - (instr, None) + let instr = ProcessedInstr::ReturnReg { + result_regs: result_regs.into_boxed_slice(), + }; + (Some(instr), None) } - wasmparser::Operator::Nop => (ProcessedInstr::NopReg, None), - wasmparser::Operator::Unreachable => (ProcessedInstr::UnreachableReg, None), + wasmparser::Operator::Nop => (None, None), + wasmparser::Operator::Unreachable => (Some(ProcessedInstr::UnreachableReg), None), wasmparser::Operator::Drop => { // Pop from type_stack to keep it in sync, but no runtime operation needed allocator.pop_any(); - (ProcessedInstr::NopReg, None) + (None, None) } // Conversion instructions - use ConversionReg wasmparser::Operator::I64ExtendI32S => { let src = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I64_EXTEND_I32_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I64_EXTEND_I32_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I64ExtendI32U => { let src = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I64_EXTEND_I32_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I64_EXTEND_I32_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I32WrapI64 => { let src = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I32_WRAP_I64, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I32_WRAP_I64, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I32TruncF32S => { let src = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I32_TRUNC_F32_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I32_TRUNC_F32_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I32TruncF32U => { let src = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I32_TRUNC_F32_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I32_TRUNC_F32_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I32TruncF64S => { let src = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I32_TRUNC_F64_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I32_TRUNC_F64_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I32TruncF64U => { let src = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I32_TRUNC_F64_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I32_TRUNC_F64_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I64TruncF32S => { let src = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I64_TRUNC_F32_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I64_TRUNC_F32_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I64TruncF32U => { let src = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I64_TRUNC_F32_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I64_TRUNC_F32_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I64TruncF64S => { let src = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I64_TRUNC_F64_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I64_TRUNC_F64_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I64TruncF64U => { let src = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I64_TRUNC_F64_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I64_TRUNC_F64_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I32TruncSatF32S => { let src = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I32_TRUNC_SAT_F32_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I32_TRUNC_SAT_F32_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I32TruncSatF32U => { let src = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I32_TRUNC_SAT_F32_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I32_TRUNC_SAT_F32_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I32TruncSatF64S => { let src = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I32_TRUNC_SAT_F64_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I32_TRUNC_SAT_F64_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I32TruncSatF64U => { let src = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I32_TRUNC_SAT_F64_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I32_TRUNC_SAT_F64_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I64TruncSatF32S => { let src = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I64_TRUNC_SAT_F32_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I64_TRUNC_SAT_F32_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I64TruncSatF32U => { let src = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I64_TRUNC_SAT_F32_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I64_TRUNC_SAT_F32_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I64TruncSatF64S => { let src = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I64_TRUNC_SAT_F64_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I64_TRUNC_SAT_F64_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I64TruncSatF64U => { let src = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I64_TRUNC_SAT_F64_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I64_TRUNC_SAT_F64_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F32ConvertI32S => { let src = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F32_CONVERT_I32_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F32_CONVERT_I32_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F32ConvertI32U => { let src = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F32_CONVERT_I32_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F32_CONVERT_I32_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F32ConvertI64S => { let src = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F32_CONVERT_I64_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F32_CONVERT_I64_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F32ConvertI64U => { let src = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F32_CONVERT_I64_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F32_CONVERT_I64_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F64ConvertI32S => { let src = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F64_CONVERT_I32_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F64_CONVERT_I32_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F64ConvertI32U => { let src = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F64_CONVERT_I32_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F64_CONVERT_I32_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F64ConvertI64S => { let src = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F64_CONVERT_I64_S, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F64_CONVERT_I64_S, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F64ConvertI64U => { let src = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F64_CONVERT_I64_U, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F64_CONVERT_I64_U, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F32DemoteF64 => { let src = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F32_DEMOTE_F64, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F32_DEMOTE_F64, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F64PromoteF32 => { let src = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F64_PROMOTE_F32, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F64_PROMOTE_F32, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I32ReinterpretF32 => { let src = allocator.pop(&ValueType::NumType(NumType::F32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I32_REINTERPRET_F32, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I32_REINTERPRET_F32, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::I64ReinterpretF64 => { let src = allocator.pop(&ValueType::NumType(NumType::F64)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_I64_REINTERPRET_F64, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_I64_REINTERPRET_F64, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F32ReinterpretI32 => { let src = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F32_REINTERPRET_I32, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F32_REINTERPRET_I32, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } wasmparser::Operator::F64ReinterpretI64 => { let src = allocator.pop(&ValueType::NumType(NumType::I64)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::ConversionReg { + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::ConversionReg { handler_index: HANDLER_IDX_F64_REINTERPRET_I64, - dst, + dst: RegOrLocal::Local(local_idx), src, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::ConversionReg { + handler_index: HANDLER_IDX_F64_REINTERPRET_I64, + dst: RegOrLocal::Reg(dst.index()), + src, + }), + None, + ) + } } // Memory Load instructions wasmparser::Operator::I32Load { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I32_LOAD, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I32_LOAD, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::I64Load { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I64_LOAD, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I64_LOAD, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::F32Load { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::F32)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_f32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F32)); + allocator.pop(&ValueType::NumType(NumType::F32)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_F32_LOAD, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F32)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_F32_LOAD, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::F64Load { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::F64)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_f64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::F64)); + allocator.pop(&ValueType::NumType(NumType::F64)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_F64_LOAD, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::F64)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_F64_LOAD, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::I32Load8S { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I32_LOAD8_S, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I32_LOAD8_S, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::I32Load8U { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I32_LOAD8_U, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I32_LOAD8_U, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::I32Load16S { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I32_LOAD16_S, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I32_LOAD16_S, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::I32Load16U { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I32)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i32(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I32)); + allocator.pop(&ValueType::NumType(NumType::I32)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I32_LOAD16_U, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I32)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I32_LOAD16_U, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::I64Load8S { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I64_LOAD8_S, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I64_LOAD8_S, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::I64Load8U { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I64_LOAD8_U, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I64_LOAD8_U, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::I64Load16S { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I64_LOAD16_S, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I64_LOAD16_S, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::I64Load16U { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I64_LOAD16_U, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I64_LOAD16_U, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::I64Load32S { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I64_LOAD32_S, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I64_LOAD32_S, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } wasmparser::Operator::I64Load32U { memarg } => { - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ValueType::NumType(NumType::I64)); - ( - ProcessedInstr::MemoryLoadReg { + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); + if let Some(local_idx) = try_fold_dst_i64(&mut ops, param_types, locals) { + let _ = ops.next(); + let _dst = allocator.push(ValueType::NumType(NumType::I64)); + allocator.pop(&ValueType::NumType(NumType::I64)); + initial_processed_instrs.push(ProcessedInstr::MemoryLoadReg { handler_index: HANDLER_IDX_I64_LOAD32_U, - dst, + dst: RegOrLocal::Local(local_idx), addr, offset: memarg.offset, - }, - None, - ) + }); + current_processed_pc += 1; + (None, None) + } else { + let dst = allocator.push(ValueType::NumType(NumType::I64)); + ( + Some(ProcessedInstr::MemoryLoadReg { + handler_index: HANDLER_IDX_I64_LOAD32_U, + dst: RegOrLocal::Reg(dst.index()), + addr, + offset: memarg.offset, + }), + None, + ) + } } // Memory Store instructions wasmparser::Operator::I32Store { memarg } => { let value = allocator.pop(&ValueType::NumType(NumType::I32)); - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); ( - ProcessedInstr::MemoryStoreReg { + Some(ProcessedInstr::MemoryStoreReg { handler_index: HANDLER_IDX_I32_STORE, addr, value, offset: memarg.offset, - }, + }), None, ) } wasmparser::Operator::I64Store { memarg } => { let value = allocator.pop(&ValueType::NumType(NumType::I64)); - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); ( - ProcessedInstr::MemoryStoreReg { + Some(ProcessedInstr::MemoryStoreReg { handler_index: HANDLER_IDX_I64_STORE, addr, value, offset: memarg.offset, - }, + }), None, ) } wasmparser::Operator::F32Store { memarg } => { let value = allocator.pop(&ValueType::NumType(NumType::F32)); - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); ( - ProcessedInstr::MemoryStoreReg { + Some(ProcessedInstr::MemoryStoreReg { handler_index: HANDLER_IDX_F32_STORE, addr, value, offset: memarg.offset, - }, + }), None, ) } wasmparser::Operator::F64Store { memarg } => { let value = allocator.pop(&ValueType::NumType(NumType::F64)); - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); ( - ProcessedInstr::MemoryStoreReg { + Some(ProcessedInstr::MemoryStoreReg { handler_index: HANDLER_IDX_F64_STORE, addr, value, offset: memarg.offset, - }, + }), None, ) } wasmparser::Operator::I32Store8 { memarg } => { let value = allocator.pop(&ValueType::NumType(NumType::I32)); - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); ( - ProcessedInstr::MemoryStoreReg { + Some(ProcessedInstr::MemoryStoreReg { handler_index: HANDLER_IDX_I32_STORE8, addr, value, offset: memarg.offset, - }, + }), None, ) } wasmparser::Operator::I32Store16 { memarg } => { let value = allocator.pop(&ValueType::NumType(NumType::I32)); - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); ( - ProcessedInstr::MemoryStoreReg { + Some(ProcessedInstr::MemoryStoreReg { handler_index: HANDLER_IDX_I32_STORE16, addr, value, offset: memarg.offset, - }, + }), None, ) } wasmparser::Operator::I64Store8 { memarg } => { let value = allocator.pop(&ValueType::NumType(NumType::I64)); - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); ( - ProcessedInstr::MemoryStoreReg { + Some(ProcessedInstr::MemoryStoreReg { handler_index: HANDLER_IDX_I64_STORE8, addr, value, offset: memarg.offset, - }, + }), None, ) } wasmparser::Operator::I64Store16 { memarg } => { let value = allocator.pop(&ValueType::NumType(NumType::I64)); - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); ( - ProcessedInstr::MemoryStoreReg { + Some(ProcessedInstr::MemoryStoreReg { handler_index: HANDLER_IDX_I64_STORE16, addr, value, offset: memarg.offset, - }, + }), None, ) } wasmparser::Operator::I64Store32 { memarg } => { let value = allocator.pop(&ValueType::NumType(NumType::I64)); - let addr = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr_reg = allocator.pop(&ValueType::NumType(NumType::I32)); + let addr = take_i32_operand(&mut pending_operands, addr_reg.index()); ( - ProcessedInstr::MemoryStoreReg { + Some(ProcessedInstr::MemoryStoreReg { handler_index: HANDLER_IDX_I64_STORE32, addr, value, offset: memarg.offset, - }, + }), None, ) } @@ -4333,12 +6962,12 @@ fn decode_processed_instrs_and_fixups<'a>( wasmparser::Operator::MemorySize { .. } => { let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::MemoryOpsReg { + Some(ProcessedInstr::MemoryOpsReg { handler_index: HANDLER_IDX_MEMORY_SIZE, dst: Some(dst), - args: vec![], + args: Box::new([]), data_index: 0, - }, + }), None, ) } @@ -4346,12 +6975,12 @@ fn decode_processed_instrs_and_fixups<'a>( let delta = allocator.pop(&ValueType::NumType(NumType::I32)); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::MemoryOpsReg { + Some(ProcessedInstr::MemoryOpsReg { handler_index: HANDLER_IDX_MEMORY_GROW, dst: Some(dst), - args: vec![delta], + args: Box::new([delta]), data_index: 0, - }, + }), None, ) } @@ -4360,12 +6989,12 @@ fn decode_processed_instrs_and_fixups<'a>( let src = allocator.pop(&ValueType::NumType(NumType::I32)); let dest = allocator.pop(&ValueType::NumType(NumType::I32)); ( - ProcessedInstr::MemoryOpsReg { + Some(ProcessedInstr::MemoryOpsReg { handler_index: HANDLER_IDX_MEMORY_COPY, dst: None, - args: vec![dest, src, len], + args: Box::new([dest, src, len]), data_index: 0, - }, + }), None, ) } @@ -4374,12 +7003,12 @@ fn decode_processed_instrs_and_fixups<'a>( let offset = allocator.pop(&ValueType::NumType(NumType::I32)); let dest = allocator.pop(&ValueType::NumType(NumType::I32)); ( - ProcessedInstr::MemoryOpsReg { + Some(ProcessedInstr::MemoryOpsReg { handler_index: HANDLER_IDX_MEMORY_INIT, dst: None, - args: vec![dest, offset, len], + args: Box::new([dest, offset, len]), data_index: *data_index, - }, + }), None, ) } @@ -4388,19 +7017,19 @@ fn decode_processed_instrs_and_fixups<'a>( let val = allocator.pop(&ValueType::NumType(NumType::I32)); let dest = allocator.pop(&ValueType::NumType(NumType::I32)); ( - ProcessedInstr::MemoryOpsReg { + Some(ProcessedInstr::MemoryOpsReg { handler_index: HANDLER_IDX_MEMORY_FILL, dst: None, - args: vec![dest, val, size], + args: Box::new([dest, val, size]), data_index: 0, - }, + }), None, ) } wasmparser::Operator::DataDrop { data_index } => ( - ProcessedInstr::DataDropReg { + Some(ProcessedInstr::DataDropReg { data_index: *data_index, - }, + }), None, ), @@ -4411,7 +7040,7 @@ fn decode_processed_instrs_and_fixups<'a>( let cond = allocator.pop(&ValueType::NumType(NumType::I32)); let val2 = allocator.pop(&val_type); let val1 = allocator.pop(&val_type); - let dst = allocator.push(val_type.clone()); + let dst = allocator.push(val_type); let handler_index = match &val_type { ValueType::NumType(NumType::I32) => HANDLER_IDX_SELECT_I32, @@ -4423,13 +7052,13 @@ fn decode_processed_instrs_and_fixups<'a>( }; ( - ProcessedInstr::SelectReg { + Some(ProcessedInstr::SelectReg { handler_index, dst, val1, val2, cond, - }, + }), None, ) } @@ -4455,13 +7084,13 @@ fn decode_processed_instrs_and_fixups<'a>( let dst = allocator.push(val_type); ( - ProcessedInstr::SelectReg { + Some(ProcessedInstr::SelectReg { handler_index, dst, val1, val2, cond, - }, + }), None, ) } @@ -4471,14 +7100,14 @@ fn decode_processed_instrs_and_fixups<'a>( wasmparser::HeapType::Extern => RefType::ExternalRef, _ => RefType::ExternalRef, }; - let dst = allocator.push(ValueType::RefType(ref_type.clone())); + let dst = allocator.push(ValueType::RefType(ref_type)); ( - ProcessedInstr::TableRefReg { + Some(ProcessedInstr::TableRefReg { handler_index: HANDLER_IDX_REF_NULL_REG, table_idx: 0, regs: [dst.index(), 0, 0], ref_type, - }, + }), None, ) } @@ -4488,12 +7117,12 @@ fn decode_processed_instrs_and_fixups<'a>( let src = allocator.pop(&ValueType::RefType(RefType::FuncRef)); let dst = allocator.push(ValueType::NumType(NumType::I32)); ( - ProcessedInstr::TableRefReg { + Some(ProcessedInstr::TableRefReg { handler_index: HANDLER_IDX_REF_IS_NULL_REG, table_idx: 0, regs: [dst.index(), src.index(), 0], ref_type: RefType::FuncRef, // Not used for RefIsNull - }, + }), None, ) } @@ -4502,18 +7131,18 @@ fn decode_processed_instrs_and_fixups<'a>( // table.get: [i32] -> [ref] let ref_type_vt = get_table_element_type(module, *table); let idx = allocator.pop(&ValueType::NumType(NumType::I32)); - let dst = allocator.push(ref_type_vt.clone()); + let dst = allocator.push(ref_type_vt); let ref_type = match ref_type_vt { ValueType::RefType(rt) => rt, _ => RefType::FuncRef, }; ( - ProcessedInstr::TableRefReg { + Some(ProcessedInstr::TableRefReg { handler_index: HANDLER_IDX_TABLE_GET_REG, table_idx: *table, regs: [dst.index(), idx.index(), 0], ref_type, - }, + }), None, ) } @@ -4524,12 +7153,12 @@ fn decode_processed_instrs_and_fixups<'a>( let val = allocator.pop(&ref_type_vt); let idx = allocator.pop(&ValueType::NumType(NumType::I32)); ( - ProcessedInstr::TableRefReg { + Some(ProcessedInstr::TableRefReg { handler_index: HANDLER_IDX_TABLE_SET_REG, table_idx: *table, regs: [idx.index(), val.index(), 0], ref_type: RefType::FuncRef, // Not used for TableSet - }, + }), None, ) } @@ -4541,12 +7170,12 @@ fn decode_processed_instrs_and_fixups<'a>( let val = allocator.pop(&ref_type_vt); let i = allocator.pop(&ValueType::NumType(NumType::I32)); ( - ProcessedInstr::TableRefReg { + Some(ProcessedInstr::TableRefReg { handler_index: HANDLER_IDX_TABLE_FILL_REG, table_idx: *table, regs: [i.index(), val.index(), n.index()], ref_type: RefType::FuncRef, // Not used for TableFill - }, + }), None, ) } @@ -4603,57 +7232,60 @@ fn decode_processed_instrs_and_fixups<'a>( } // All instructions are now register-based - initial_processed_instrs.push(processed_instr_template); - if let Some(fixup_info) = fixup_info_opt { - initial_fixups.push(fixup_info); - } + // Only push if we have an instruction (None means folded away) + if let Some(instr) = processed_instr_template { + initial_processed_instrs.push(instr); + if let Some(fixup_info) = fixup_info_opt { + initial_fixups.push(fixup_info); + } - // Update control_info_stack and block_result_regs_map - match op { - wasmparser::Operator::Block { .. } => { - // Register for BrTable resolution (always needed) - if let Some(block_info) = control_info_stack.last() { - block_result_regs_map.insert( - current_processed_pc, - (block_info.result_regs.clone(), false), - ); + // Update control_info_stack and block_result_regs_map + match op { + wasmparser::Operator::Block { .. } => { + // Register for BrTable resolution (always needed) + if let Some(block_info) = control_info_stack.last() { + block_result_regs_map.insert( + current_processed_pc, + (block_info.result_regs.clone(), false), + ); + } } - } - wasmparser::Operator::Loop { .. } => { - // Register for BrTable resolution (always needed) - // For loops, register param_regs (used when branching to loop) - if let Some(block_info) = control_info_stack.last() { - block_result_regs_map - .insert(current_processed_pc, (block_info.param_regs.clone(), true)); + wasmparser::Operator::Loop { .. } => { + // Register for BrTable resolution (always needed) + // For loops, register param_regs (used when branching to loop) + if let Some(block_info) = control_info_stack.last() { + block_result_regs_map + .insert(current_processed_pc, (block_info.param_regs.clone(), true)); + } } - } - wasmparser::Operator::If { .. } => { - // Register for BrTable resolution (always needed) - if let Some(block_info) = control_info_stack.last() { - block_result_regs_map.insert( - current_processed_pc, - (block_info.result_regs.clone(), false), - ); + wasmparser::Operator::If { .. } => { + // Register for BrTable resolution (always needed) + if let Some(block_info) = control_info_stack.last() { + block_result_regs_map.insert( + current_processed_pc, + (block_info.result_regs.clone(), false), + ); + } } + wasmparser::Operator::End => { + // Register mode End already popped in its match arm above + } + _ => {} } - wasmparser::Operator::End => { - // Register mode End already popped in its match arm above - } - _ => {} - } - // Mark following code as unreachable after unconditional control flow - match op { - wasmparser::Operator::Br { .. } - | wasmparser::Operator::BrTable { .. } - | wasmparser::Operator::Return - | wasmparser::Operator::Unreachable => { - unreachable_depth = 1; + // Mark following code as unreachable after unconditional control flow + match op { + wasmparser::Operator::Br { .. } + | wasmparser::Operator::BrTable { .. } + | wasmparser::Operator::Return + | wasmparser::Operator::Unreachable => { + unreachable_depth = 1; + } + _ => {} } - _ => {} - } - current_processed_pc += 1; + current_processed_pc += 1; + } } if !control_stack_for_map_building.is_empty() { @@ -4699,7 +7331,7 @@ fn compute_branch_regs( // Get target block from control_info_stack let stack_len = control_info_stack.len(); if stack_len == 0 || relative_depth >= stack_len { - return (vec![], vec![]); + return (Vec::new(), Vec::new()); } let target_idx = stack_len - 1 - relative_depth; @@ -4717,7 +7349,7 @@ fn compute_branch_regs( // Get the registers at stack top matching result types let result_count = target_result_regs.len(); if result_count == 0 { - vec![] + Vec::new() } else { // Get current state and compute source registers let state = allocator.save_state(); @@ -4756,7 +7388,7 @@ fn compute_branch_regs( .collect() } } else { - vec![] + Vec::new() }; (source_regs, target_result_regs) diff --git a/src/structure/types.rs b/src/structure/types.rs index 232cd81..1ca1dfc 100644 --- a/src/structure/types.rs +++ b/src/structure/types.rs @@ -17,7 +17,7 @@ use serde::{Deserialize, Serialize}; /// WebAssembly value type. /// /// Represents the types that can appear on the operand stack and in locals/globals. -#[derive(PartialEq, Eq, Hash, Debug, Clone, Serialize, Deserialize)] +#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, Serialize, Deserialize)] pub enum ValueType { NumType(NumType), VecType(VecType), @@ -25,7 +25,7 @@ pub enum ValueType { } /// Numeric types (i32, i64, f32, f64). -#[derive(PartialEq, Eq, Hash, Debug, Clone, Serialize, Deserialize)] +#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, Serialize, Deserialize)] pub enum NumType { I32, I64, @@ -34,7 +34,7 @@ pub enum NumType { } /// SIMD vector type (v128). -#[derive(PartialEq, Eq, Hash, Debug, Clone, Serialize, Deserialize)] +#[derive(PartialEq, Eq, Hash, Debug, Clone, Copy, Serialize, Deserialize)] pub enum VecType { V128, } @@ -96,7 +96,7 @@ impl Into for TableIdx { impl GetIdx for TableIdx {} /// Index into the memory section. -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +#[derive(PartialEq, Debug, Clone, Copy, Serialize, Deserialize)] pub struct MemIdx(pub u32); impl Into for MemIdx { fn into(self) -> u32 { @@ -116,7 +116,7 @@ impl Into for FuncIdx { impl GetIdx for FuncIdx {} /// Index into the global section. -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +#[derive(PartialEq, Debug, Clone, Copy, Serialize, Deserialize)] pub struct GlobalIdx(pub u32); impl Into for GlobalIdx { fn into(self) -> u32 { @@ -126,19 +126,19 @@ impl Into for GlobalIdx { impl GetIdx for GlobalIdx {} /// Index into local variables within a function. -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +#[derive(PartialEq, Debug, Clone, Copy, Serialize, Deserialize)] pub struct LocalIdx(pub u32); /// SIMD lane index (0-15 for v128). -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +#[derive(PartialEq, Debug, Clone, Copy, Serialize, Deserialize)] pub struct LaneIdx(pub u8); /// Index into the data section. -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +#[derive(PartialEq, Debug, Clone, Copy, Serialize, Deserialize)] pub struct DataIdx(pub u32); /// Label index for branch instructions. -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +#[derive(PartialEq, Debug, Clone, Copy, Serialize, Deserialize)] pub struct LabelIdx(pub u32); impl Into for LabelIdx { fn into(self) -> u32 { @@ -148,7 +148,7 @@ impl Into for LabelIdx { impl GetIdx for LabelIdx {} /// Index into the element section. -#[derive(PartialEq, Debug, Clone, Serialize, Deserialize)] +#[derive(PartialEq, Debug, Clone, Copy, Serialize, Deserialize)] pub struct ElemIdx(pub u32); /// Block type for structured control flow. @@ -167,11 +167,11 @@ pub struct Byte(pub u8); pub struct Name(pub String); /// Table type specifying limits and element reference type. -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +#[derive(PartialEq, Clone, Copy, Debug, Serialize, Deserialize)] pub struct TableType(pub Limits, pub RefType); /// Size limits for tables and memories. -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +#[derive(PartialEq, Clone, Copy, Debug, Serialize, Deserialize)] pub struct Limits { /// Minimum size (in pages for memory, elements for tables). pub min: u32, @@ -180,15 +180,15 @@ pub struct Limits { } /// Memory type specifying size limits. -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +#[derive(PartialEq, Clone, Copy, Debug, Serialize, Deserialize)] pub struct MemType(pub Limits); /// Global type specifying mutability and value type. -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +#[derive(PartialEq, Clone, Copy, Debug, Serialize, Deserialize)] pub struct GlobalType(pub Mut, pub ValueType); /// Mutability indicator for globals. -#[derive(PartialEq, Clone, Debug, Serialize, Deserialize)] +#[derive(PartialEq, Clone, Copy, Debug, Serialize, Deserialize)] pub enum Mut { /// Immutable global. Const, diff --git a/src/wasi/passthrough.rs b/src/wasi/passthrough.rs index cc46ea7..e0f298f 100644 --- a/src/wasi/passthrough.rs +++ b/src/wasi/passthrough.rs @@ -241,16 +241,12 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - if wasi_errno == 0 { let nwritten_memarg = Memarg { offset: 0, align: 4, }; - memory - .store(&nwritten_memarg, nwritten_ptr as i32, nwritten) - .map_err(|_| WasiError::Fault)?; + memory.store(&nwritten_memarg, nwritten_ptr as i32, nwritten); } Ok(wasi_errno as i32) @@ -303,16 +299,12 @@ impl PassthroughWasiImpl { let wasi_errno = unsafe { __wasi_fd_read(fd as u32, iovecs.as_ptr(), iovs_len, &mut nread as *mut u32) }; - drop(memory_guard); - if wasi_errno == 0 { let nread_memarg = Memarg { offset: 0, align: 4, }; - memory - .store(&nread_memarg, nread_ptr as i32, nread) - .map_err(|_| WasiError::Fault)?; + memory.store(&nread_memarg, nread_ptr as i32, nread); } Ok(wasi_errno as i32) @@ -336,8 +328,6 @@ impl PassthroughWasiImpl { let wasi_errno = unsafe { __wasi_random_get(memory_base.add(buf_ptr as usize) as *mut u8, buf_len) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -390,14 +380,10 @@ impl PassthroughWasiImpl { ptr_data.extend_from_slice(&0u32.to_le_bytes()); // Write pointer array to WebAssembly memory - memory - .store_bytes(environ_ptr as i32, &ptr_data) - .map_err(|_| WasiError::Fault)?; + memory.store_bytes(environ_ptr as i32, &ptr_data); // Write environment strings to WebAssembly memory - memory - .store_bytes(environ_buf_ptr as i32, &environ_buf) - .map_err(|_| WasiError::Fault)?; + memory.store_bytes(environ_buf_ptr as i32, &environ_buf); Ok(0) } @@ -423,18 +409,14 @@ impl PassthroughWasiImpl { offset: 0, align: 4, }; - memory - .store(&count_memarg, environ_count_ptr as i32, environ_count) - .map_err(|_| WasiError::Fault)?; + memory.store(&count_memarg, environ_count_ptr as i32, environ_count); // Write total buffer size needed let size_memarg = Memarg { offset: 0, align: 4, }; - memory - .store(&size_memarg, environ_buf_size_ptr as i32, environ_buf_size) - .map_err(|_| WasiError::Fault)?; + memory.store(&size_memarg, environ_buf_size_ptr as i32, environ_buf_size); Ok(0) } @@ -463,14 +445,10 @@ impl PassthroughWasiImpl { ptr_data.extend_from_slice(&0u32.to_le_bytes()); // Write pointer array to WebAssembly memory - memory - .store_bytes(argv_ptr as i32, &ptr_data) - .map_err(|_| WasiError::Fault)?; + memory.store_bytes(argv_ptr as i32, &ptr_data); // Write argument strings to WebAssembly memory - memory - .store_bytes(argv_buf_ptr as i32, &argv_buf) - .map_err(|_| WasiError::Fault)?; + memory.store_bytes(argv_buf_ptr as i32, &argv_buf); Ok(0) } @@ -494,22 +472,18 @@ impl PassthroughWasiImpl { offset: 0, align: 4, }; - memory - .store(&argc_memarg, argc_ptr as i32, argc) - .map_err(|_| WasiError::Fault)?; + memory.store(&argc_memarg, argc_ptr as i32, argc); // Write total buffer size needed to WebAssembly memory let argv_buf_size_memarg = Memarg { offset: 0, align: 4, }; - memory - .store( - &argv_buf_size_memarg, - argv_buf_size_ptr as i32, - argv_buf_size, - ) - .map_err(|_| WasiError::Fault)?; + memory.store( + &argv_buf_size_memarg, + argv_buf_size_ptr as i32, + argv_buf_size, + ); Ok(0) } @@ -531,9 +505,7 @@ impl PassthroughWasiImpl { } // Write timestamp (64-bit nanoseconds) to memory using store_bytes - memory - .store_bytes(time_ptr as i32, &time.to_le_bytes()) - .map_err(|_| WasiError::Fault)?; + memory.store_bytes(time_ptr as i32, &time.to_le_bytes()); Ok(wasi_errno as i32) } @@ -553,9 +525,7 @@ impl PassthroughWasiImpl { } // Write resolution (64-bit nanoseconds) to memory using store_bytes - memory - .store_bytes(resolution_ptr as i32, &resolution.to_le_bytes()) - .map_err(|_| WasiError::Fault)?; + memory.store_bytes(resolution_ptr as i32, &resolution.to_le_bytes()); Ok(wasi_errno as i32) } @@ -568,8 +538,6 @@ impl PassthroughWasiImpl { __wasi_fd_prestat_get(fd as u32, memory_base.add(prestat_ptr as usize) as *mut u8) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -591,8 +559,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -610,8 +576,6 @@ impl PassthroughWasiImpl { __wasi_fd_fdstat_get(fd as u32, memory_base.add(stat_ptr as usize) as *mut u8) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -651,8 +615,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -676,8 +638,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -688,8 +648,6 @@ impl PassthroughWasiImpl { let wasi_errno = unsafe { __wasi_fd_tell(fd as u32, memory_base.add(offset_ptr as usize) as *mut u64) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -707,8 +665,6 @@ impl PassthroughWasiImpl { __wasi_fd_filestat_get(fd as u32, memory_base.add(filestat_ptr as usize) as *mut u8) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -734,8 +690,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -795,8 +749,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - if wasi_errno != 0 { return Ok(wasi_errno as i32); } @@ -805,9 +757,7 @@ impl PassthroughWasiImpl { offset: 0, align: 4, }; - memory - .store(&nread_memarg, nread_ptr as i32, nread) - .map_err(|_| WasiError::Fault)?; + memory.store(&nread_memarg, nread_ptr as i32, nread); Ok(0) } @@ -886,8 +836,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - if wasi_errno != 0 { return Ok(wasi_errno as i32); } @@ -896,9 +844,7 @@ impl PassthroughWasiImpl { offset: 0, align: 4, }; - memory - .store(&nwritten_memarg, nwritten_ptr as i32, nwritten) - .map_err(|_| WasiError::Fault)?; + memory.store(&nwritten_memarg, nwritten_ptr as i32, nwritten); Ok(0) } @@ -922,8 +868,6 @@ impl PassthroughWasiImpl { let wasi_errno = unsafe { __wasi_path_create_directory(fd as u32, path_vec.as_ptr()) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -955,8 +899,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -992,8 +934,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -1027,8 +967,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -1051,8 +989,6 @@ impl PassthroughWasiImpl { let wasi_errno = unsafe { __wasi_path_remove_directory(fd as u32, path_vec.as_ptr()) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -1074,8 +1010,6 @@ impl PassthroughWasiImpl { let wasi_errno = unsafe { __wasi_path_unlink_file(fd as u32, path_vec.as_ptr()) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -1099,8 +1033,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -1210,8 +1142,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -1256,8 +1186,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -1295,8 +1223,6 @@ impl PassthroughWasiImpl { let wasi_errno = unsafe { __wasi_path_symlink(old_path_vec.as_ptr(), fd, new_path_vec.as_ptr()) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -1313,8 +1239,6 @@ impl PassthroughWasiImpl { let wasi_errno = unsafe { __wasi_sock_accept(fd, flags, memory_base.add(fd_ptr as usize) as *mut u32) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -1342,8 +1266,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) } @@ -1369,8 +1291,6 @@ impl PassthroughWasiImpl { ) }; - drop(memory_guard); - Ok(wasi_errno as i32) }