diff --git a/src/instance/vm.zig b/src/instance/vm.zig index e351550e..08e15123 100644 --- a/src/instance/vm.zig +++ b/src/instance/vm.zig @@ -84,15 +84,18 @@ pub const VirtualMachine = struct { } pub fn invoke(self: *VirtualMachine, ip: usize) !void { - const instr = self.inst.module.parsed_code.items[ip]; + const instr = self.inst.module.instructions.items[ip]; + const imm = self.inst.module.immediates_offset.items[ip]; - try @call(.auto, lookup[@intFromEnum(instr)], .{ self, ip, self.inst.module.parsed_code.items }); + try @call(.auto, instr, .{ self, ip, imm, @as([]Instruction, @ptrCast(self.inst.module.instructions.items)), self.inst.module.immediates.items }); } - const InstructionFunction = *const fn (*VirtualMachine, usize, []Rr) WasmError!void; + // To avoid a recursive definition, define similar function pointer type we will cast to / from + pub const Instruction = *const fn (*VirtualMachine, usize, usize, []*void, []u32) WasmError!void; + pub const InstructionFunction = *const fn (*VirtualMachine, usize, usize, []Instruction, []u32) WasmError!void; - const lookup = [256]InstructionFunction{ - @"unreachable", nop, block, loop, @"if", @"else", if_no_else, impl_ni, impl_ni, impl_ni, impl_ni, end, br, br_if, br_table, @"return", + pub const lookup = [256]InstructionFunction{ + @"unreachable", nop, block, loop, @"if", @"else", if_with_else, impl_ni, impl_ni, impl_ni, impl_ni, end, br, br_if, br_table, @"return", call, call_indirect, fast_call, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, drop, select, select, impl_ni, impl_ni, impl_ni, @"local.get", @"local.set", @"local.tee", @"global.get", @"global.set", @"table.get", @"table.set", impl_ni, @"i32.load", @"i64.load", @"f32.load", @"f64.load", @"i32.load8_s", @"i32.load8_u", @"i32.load16_s", @"i32.load16_u", @"i64.load8_s", @"i64.load8_u", @"i64.load16_s", @"i64.load16_u", @"i64.load32_s", @"i64.load32_u", @"i32.store", @"i64.store", @"f32.store", @"f64.store", @"i32.store8", @"i32.store16", @"i64.store8", @"i64.store16", @"i64.store32", @"memory.size", @@ -110,120 +113,142 @@ pub const VirtualMachine = struct { impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, impl_ni, misc, impl_ni, impl_ni, impl_ni, }; - inline fn dispatch(self: *VirtualMachine, next_ip: usize, code: []Rr) WasmError!void { - const next_instr = code[next_ip]; + inline fn dispatch(self: *VirtualMachine, next_ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const next_fn = instructions[next_ip]; - return try @call(.always_tail, lookup[@intFromEnum(next_instr)], .{ self, next_ip, code }); + return try @call(.always_tail, @as(InstructionFunction, @ptrCast(next_fn)), .{ self, next_ip, imm, instructions, immediates }); } pub const REF_NULL: u64 = 0xFFFF_FFFF_FFFF_FFFF; - fn impl_ni(_: *VirtualMachine, _: usize, _: []Rr) WasmError!void { + pub fn impl_ni(_: *VirtualMachine, _: usize, _: usize, _: []Instruction, _: []u32) WasmError!void { return error.NotImplemented; } - fn @"unreachable"(_: *VirtualMachine, _: usize, _: []Rr) WasmError!void { + pub fn @"unreachable"(_: *VirtualMachine, _: usize, _: usize, _: []Instruction, _: []u32) WasmError!void { return error.TrapUnreachable; } - fn nop(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - return dispatch(self, ip + 1, code); + pub fn nop(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn block(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].block; + pub fn block(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + // const meta = code[ip].block; + const param_arity = immediates[imm]; + const return_arity = immediates[imm + 1]; + const branch_target = immediates[imm + 2]; try self.pushLabel(Label{ - .return_arity = meta.return_arity, - .op_stack_len = self.op_ptr - meta.param_arity, - .branch_target = meta.branch_target, + .return_arity = return_arity, + .op_stack_len = self.op_ptr - param_arity, + .branch_target = branch_target, }); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 3, instructions, immediates); } - fn loop(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].loop; + pub fn loop(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const param_arity = immediates[imm]; + const return_arity = immediates[imm + 1]; + _ = return_arity; + const branch_target = immediates[imm + 2]; try self.pushLabel(Label{ // note that we use block_params rather than block_returns for return arity: - .return_arity = meta.param_arity, - .op_stack_len = self.op_ptr - meta.param_arity, - .branch_target = meta.branch_target, + .return_arity = param_arity, + .op_stack_len = self.op_ptr - param_arity, + .branch_target = branch_target, }); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 3, instructions, immediates); } - fn @"if"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"if"; + pub fn if_with_else(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const param_arity = immediates[imm]; + const return_arity = immediates[imm + 1]; + const branch_target = immediates[imm + 2]; + const else_ip = immediates[imm + 3]; + const condition = self.popOperand(u32); try self.pushLabel(Label{ - .return_arity = meta.return_arity, - .op_stack_len = self.op_ptr - meta.param_arity, - .branch_target = meta.branch_target, + .return_arity = return_arity, + .op_stack_len = self.op_ptr - param_arity, + .branch_target = branch_target, }); - return dispatch(self, if (condition == 0) meta.else_ip else ip + 1, code); + return dispatch(self, if (condition == 0) else_ip else ip + 1, imm + 4, instructions, immediates); } - fn @"else"(self: *VirtualMachine, _: usize, code: []Rr) WasmError!void { + pub fn @"else"(self: *VirtualMachine, _: usize, _: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const label = self.popLabel(); - return dispatch(self, label.branch_target, code); + const next_imm = self.inst.module.immediates_offset.items[label.branch_target]; + return dispatch(self, label.branch_target, next_imm, instructions, immediates); } - fn if_no_else(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].if_no_else; + // if_no_else + pub fn @"if"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + // const meta = code[ip].if_no_else; + const param_arity = immediates[imm]; + const return_arity = immediates[imm + 1]; + const branch_target = immediates[imm + 2]; + _ = immediates[imm + 3]; + const condition = self.popOperand(u32); if (condition == 0) { - return dispatch(self, meta.branch_target, code); + const next_imm = self.inst.module.immediates_offset.items[branch_target]; + return dispatch(self, branch_target, next_imm, instructions, immediates); } else { // We are inside the if branch try self.pushLabel(Label{ - .return_arity = meta.return_arity, - .op_stack_len = self.op_ptr - meta.param_arity, - .branch_target = meta.branch_target, + .return_arity = return_arity, + .op_stack_len = self.op_ptr - param_arity, + .branch_target = branch_target, }); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 4, instructions, immediates); } } - fn end(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn end(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { _ = self.popLabel(); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn br(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const next_ip = self.branch(code[ip].br); + pub fn br(self: *VirtualMachine, _: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const next_ip = self.branch(immediates[imm]); - return dispatch(self, next_ip, code); + return dispatch(self, next_ip, imm + 1, instructions, immediates); } - fn br_if(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn br_if(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const condition = self.popOperand(u32); - const next_ip = if (condition == 0) ip + 1 else self.branch(code[ip].br_if); + const next_ip = if (condition == 0) ip + 1 else self.branch(immediates[imm]); + const next_offset = self.inst.module.immediates_offset.items[next_ip]; - return dispatch(self, next_ip, code); + return dispatch(self, next_ip, next_offset, instructions, immediates); } - fn br_table(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].br_table; + pub fn br_table(self: *VirtualMachine, _: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const ls_ptr = immediates[imm]; + const ls_len = immediates[imm + 1]; + const ln = immediates[imm + 2]; const i = self.popOperand(u32); - const ls = self.inst.module.br_table_indices.items[meta.ls.offset .. meta.ls.offset + meta.ls.count]; + const ls = self.inst.module.br_table_indices.items[ls_ptr .. ls_ptr + ls_len]; - const next_ip = if (i >= ls.len) self.branch(meta.ln) else self.branch(ls[i]); + const next_ip = if (i >= ls.len) self.branch(ln) else self.branch(ls[i]); + const next_imm = self.inst.module.immediates_offset.items[next_ip]; - return dispatch(self, next_ip, code); + return dispatch(self, next_ip, next_imm, instructions, immediates); } - fn @"return"(self: *VirtualMachine, _: usize, _: []Rr) WasmError!void { + pub fn @"return"(self: *VirtualMachine, _: usize, _: usize, _: []Instruction, immediates: []u32) WasmError!void { const frame = self.peekFrame(); const n = frame.return_arity; @@ -246,14 +271,18 @@ pub const VirtualMachine = struct { const previous_frame = self.peekFrame(); self.inst = previous_frame.inst; - return dispatch(self, label.branch_target, previous_frame.inst.module.parsed_code.items); + // FIXME: probably reference previous frame + const branch_target_immediate_offset = self.inst.module.immediates_offset.items[label.branch_target]; + return dispatch(self, label.branch_target, branch_target_immediate_offset, @as([]Instruction, @ptrCast(previous_frame.inst.module.instructions.items)), immediates); } - fn call(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const funcidx = code[ip].call; + pub fn call(self: *VirtualMachine, ip: usize, imm: usize, _: []Instruction, immediates: []u32) WasmError!void { + // const funcidx = code[ip].call; + const funcidx = immediates[imm]; const function = try self.inst.getFunc(funcidx); var next_ip = ip; + var next_imm = imm; switch (function.subtype) { .function => |f| { @@ -263,6 +292,17 @@ pub const VirtualMachine = struct { // Make space for locals (again, params already on stack) self.op_ptr += f.locals_count; + if (self.inst == f.instance) { + // Switch call with fast_call + self.inst.module.instructions.items[ip] = VirtualMachine.fast_call; + immediates[imm + 0] = @as(u32, @truncate(f.start)); + immediates[imm + 1] = @as(u32, @truncate(f.locals_count)); + immediates[imm + 2] = @as(u32, @truncate(function.params.len)); + immediates[imm + 3] = @as(u32, @truncate(function.results.len)); + immediates[imm + 4] = @as(u32, @truncate(f.required_stack_space)); + immediates[imm + 5] = self.inst.module.immediates_offset.items[f.start]; + } + self.inst = f.instance; // Consume parameters from the stack @@ -281,22 +321,25 @@ pub const VirtualMachine = struct { }); next_ip = f.start; + next_imm = self.inst.module.immediates_offset.items[f.start]; }, .host_function => |hf| { try hf.func(self); next_ip = ip + 1; + next_imm = imm + 6; }, } - return dispatch(self, next_ip, self.inst.module.parsed_code.items); + // FIXME: + return dispatch(self, next_ip, next_imm, @as([]Instruction, @ptrCast(self.inst.module.instructions.items)), immediates); } - fn call_indirect(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const call_indirect_instruction = code[ip].call_indirect; + pub fn call_indirect(self: *VirtualMachine, ip: usize, imm: usize, _: []Instruction, immediates: []u32) WasmError!void { + // const call_indirect_instruction = code[ip].call_indirect; var module = self.inst.module; - const typeidx = call_indirect_instruction.typeidx; - const tableidx = call_indirect_instruction.tableidx; + const typeidx = immediates[imm]; + const tableidx = immediates[imm + 1]; // Read lookup index from stack const lookup_index = self.popOperand(u32); @@ -309,6 +352,7 @@ pub const VirtualMachine = struct { try function.checkSignatures(call_indirect_func_type); var next_ip = ip; + var next_imm = imm; switch (function.subtype) { .function => |func| { @@ -341,45 +385,52 @@ pub const VirtualMachine = struct { try host_func.func(self); next_ip = ip + 1; + next_imm = imm + 1; }, } - return dispatch(self, next_ip, self.inst.module.parsed_code.items); + return dispatch(self, next_ip, next_imm, @as([]Instruction, @ptrCast(self.inst.module.instructions.items)), immediates); } - fn fast_call(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const f = code[ip].fast_call; + pub fn fast_call(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + // const f = code[ip].fast_call; + const start = immediates[imm]; + const locals = immediates[imm + 1]; + const params = immediates[imm + 2]; + const results = immediates[imm + 3]; + const required_stack_space = immediates[imm + 4]; + const next_imm = immediates[imm + 5]; // Check we have enough stack space - try self.checkStackSpace(f.required_stack_space + f.locals); + try self.checkStackSpace(required_stack_space + locals); // Make space for locals (again, params already on stack) - self.op_ptr += f.locals; + self.op_ptr += locals; // Consume parameters from the stack try self.pushFrame(Frame{ - .op_stack_len = self.op_ptr - f.params - f.locals, + .op_stack_len = self.op_ptr - params - locals, .label_stack_len = self.label_ptr, - .return_arity = f.results, + .return_arity = results, .inst = self.inst, - }, f.locals + f.params); + }, locals + params); // Our continuation is the code after call try self.pushLabel(Label{ - .return_arity = f.results, - .op_stack_len = self.op_ptr - f.params - f.locals, + .return_arity = results, + .op_stack_len = self.op_ptr - params - locals, .branch_target = ip + 1, }); - return dispatch(self, f.start, code); + return dispatch(self, start, next_imm, instructions, immediates); } - fn drop(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn drop(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { _ = self.popAnyOperand(); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn select(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn select(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const condition = self.popOperand(u32); const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); @@ -390,60 +441,60 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, c2); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"local.get"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const localidx = code[ip].@"local.get"; + pub fn @"local.get"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const localidx = immediates[imm]; const frame = self.peekFrame(); self.pushOperandNoCheck(u64, frame.locals[localidx]); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"local.set"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const localidx = code[ip].@"local.set"; + pub fn @"local.set"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const localidx = immediates[imm]; const frame = self.peekFrame(); frame.locals[localidx] = self.popOperand(u64); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"local.tee"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const localidx = code[ip].@"local.tee"; + pub fn @"local.tee"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const localidx = immediates[imm]; const frame = self.peekFrame(); frame.locals[localidx] = self.peekOperand(); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"global.get"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const globalidx = code[ip].@"global.get"; + pub fn @"global.get"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const globalidx = immediates[imm]; const global = try self.inst.getGlobal(globalidx); self.pushOperandNoCheck(u64, global.value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"global.set"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const globalidx = code[ip].@"global.set"; + pub fn @"global.set"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const globalidx = immediates[imm]; const value = self.popAnyOperand(); const global = try self.inst.getGlobal(globalidx); global.value = value; - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"table.get"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const tableidx = code[ip].@"table.get"; + pub fn @"table.get"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const tableidx = immediates[imm]; const table = try self.inst.getTable(tableidx); const index = self.popOperand(u32); @@ -455,11 +506,11 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, REF_NULL); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"table.set"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const tableidx = code[ip].@"table.set"; + pub fn @"table.set"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const tableidx = immediates[imm]; const table = try self.inst.getTable(tableidx); const ref = self.popOperand(u64); @@ -467,294 +518,340 @@ pub const VirtualMachine = struct { try table.set(index, ref); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"i32.load"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.load"; + pub fn @"i32.load"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(u32, meta.offset, address); + const value = try memory.read(u32, offset, address); self.pushOperandNoCheck(u32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i64.load"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load"; + pub fn @"i64.load"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(u64, meta.offset, address); + const value = try memory.read(u64, offset, address); self.pushOperandNoCheck(u64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"f32.load"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"f32.load"; + pub fn @"f32.load"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(f32, meta.offset, address); + const value = try memory.read(f32, offset, address); self.pushOperandNoCheck(f32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"f64.load"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"f64.load"; + pub fn @"f64.load"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(f64, meta.offset, address); + const value = try memory.read(f64, offset, address); self.pushOperandNoCheck(f64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i32.load8_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.load8_s"; + pub fn @"i32.load8_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(i8, meta.offset, address); + const value = try memory.read(i8, offset, address); self.pushOperandNoCheck(i32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i32.load8_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.load8_u"; + pub fn @"i32.load8_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(u8, meta.offset, address); + const value = try memory.read(u8, offset, address); self.pushOperandNoCheck(u32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i32.load16_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.load16_s"; + pub fn @"i32.load16_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(i16, meta.offset, address); + const value = try memory.read(i16, offset, address); self.pushOperandNoCheck(i32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i32.load16_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.load16_u"; + pub fn @"i32.load16_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(u16, meta.offset, address); + const value = try memory.read(u16, offset, address); self.pushOperandNoCheck(u32, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i64.load8_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load8_s"; + pub fn @"i64.load8_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(i8, meta.offset, address); + const value = try memory.read(i8, offset, address); self.pushOperandNoCheck(i64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i64.load8_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load8_u"; + pub fn @"i64.load8_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(u8, meta.offset, address); + const value = try memory.read(u8, offset, address); self.pushOperandNoCheck(u64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i64.load16_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load16_s"; + pub fn @"i64.load16_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(i16, meta.offset, address); + const value = try memory.read(i16, offset, address); self.pushOperandNoCheck(i64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i64.load16_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load16_u"; + pub fn @"i64.load16_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(u16, meta.offset, address); + const value = try memory.read(u16, offset, address); self.pushOperandNoCheck(u64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i64.load32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load32_s"; + pub fn @"i64.load32_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(i32, meta.offset, address); + const value = try memory.read(i32, offset, address); self.pushOperandNoCheck(i64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i64.load32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.load32_u"; + pub fn @"i64.load32_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const address = self.popOperand(u32); - const value = try memory.read(u32, meta.offset, address); + const value = try memory.read(u32, offset, address); self.pushOperandNoCheck(u64, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i32.store"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.store"; + pub fn @"i32.store"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const value = self.popOperand(u32); const address = self.popOperand(u32); - try memory.write(u32, meta.offset, address, value); + try memory.write(u32, offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i64.store"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.store"; + pub fn @"i64.store"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const value = self.popOperand(u64); const address = self.popOperand(u32); - try memory.write(u64, meta.offset, address, value); + try memory.write(u64, offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"f32.store"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"f32.store"; + pub fn @"f32.store"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const value = self.popOperand(f32); const address = self.popOperand(u32); - try memory.write(f32, meta.offset, address, value); + try memory.write(f32, offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"f64.store"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"f64.store"; + pub fn @"f64.store"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const value = self.popOperand(f64); const address = self.popOperand(u32); - try memory.write(f64, meta.offset, address, value); + try memory.write(f64, offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i32.store8"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.store8"; + pub fn @"i32.store8"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const value: u8 = @truncate(self.popOperand(u32)); const address = self.popOperand(u32); - try memory.write(u8, meta.offset, address, value); + try memory.write(u8, offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i32.store16"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i32.store16"; + pub fn @"i32.store16"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const value: u16 = @truncate(self.popOperand(u32)); const address = self.popOperand(u32); - try memory.write(u16, meta.offset, address, value); + try memory.write(u16, offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i64.store8"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.store8"; + pub fn @"i64.store8"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const value: u8 = @truncate(self.popOperand(u64)); const address = self.popOperand(u32); - try memory.write(u8, meta.offset, address, value); + try memory.write(u8, offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i64.store16"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.store16"; + pub fn @"i64.store16"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const value: u16 = @truncate(self.popOperand(u64)); const address = self.popOperand(u32); - try memory.write(u16, meta.offset, address, value); + try memory.write(u16, offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i64.store32"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].@"i64.store32"; + pub fn @"i64.store32"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const alignment = immediates[imm]; + _ = alignment; + const offset = immediates[imm + 1]; const memory = try self.inst.getMemory(0); const value: u32 = @truncate(self.popOperand(u64)); const address = self.popOperand(u32); - try memory.write(u32, meta.offset, address, value); + try memory.write(u32, offset, address, value); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"memory.size"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"memory.size"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const memory = try self.inst.getMemory(0); self.pushOperandNoCheck(u32, @as(u32, @intCast(memory.size()))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"memory.grow"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"memory.grow"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const memory = try self.inst.getMemory(0); const num_pages = self.popOperand(u32); @@ -764,394 +861,398 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, @as(i32, -1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.const"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const instr = code[ip]; + pub fn @"i32.const"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const literal = @as(i32, @intCast(immediates[imm])); - self.pushOperandNoCheck(i32, instr.@"i32.const"); + self.pushOperandNoCheck(i32, literal); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"i64.const"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const instr = code[ip]; + pub fn @"i64.const"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const lower = immediates[imm]; + const upper = immediates[imm + 1]; + const literal = @as(i64, @intCast((@as(u64, upper) << 32) + lower)); - self.pushOperandNoCheck(i64, instr.@"i64.const"); + self.pushOperandNoCheck(i64, literal); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"f32.const"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const instr = code[ip]; + pub fn @"f32.const"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const literal = @as(f32, @floatFromInt(immediates[imm])); - self.pushOperandNoCheck(f32, instr.@"f32.const"); + self.pushOperandNoCheck(f32, literal); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"f64.const"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const instr = code[ip]; + pub fn @"f64.const"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const lower = immediates[imm]; + const upper = immediates[imm + 1]; + const literal = @as(f64, @floatFromInt((@as(u64, upper) << 32) + lower)); - self.pushOperandNoCheck(f64, instr.@"f64.const"); + self.pushOperandNoCheck(f64, literal); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"i32.eqz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.eqz"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 == 0) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.eq"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.eq"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 == c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.ne"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.ne"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 != c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.lt_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.lt_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.lt_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.lt_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.gt_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.gt_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.gt_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.gt_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.le_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.le_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.le_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.le_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.ge_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.ge_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); self.pushOperandNoCheck(u32, @as(u32, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.ge_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.ge_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @as(u32, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.eqz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.eqz"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 == 0) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.eq"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.eq"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 == c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.ne"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.ne"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 != c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.lt_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.lt_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.lt_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.lt_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.gt_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.gt_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.gt_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.gt_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.le_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.le_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.le_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.le_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.ge_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.ge_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.ge_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.ge_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.eq"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.eq"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 == c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.ne"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.ne"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 != c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.lt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.lt"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.gt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.gt"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.le"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.le"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.ge"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.ge"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.eq"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.eq"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 == c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.ne"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.ne"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 != c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.lt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.lt"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 < c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.gt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.gt"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 > c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.le"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.le"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 <= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.ge"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.ge"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(u64, @as(u64, if (c1 >= c2) 1 else 0)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.clz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.clz"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @clz(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.ctz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.ctz"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @ctz(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.popcnt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.popcnt"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, @popCount(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.add"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.add"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 +% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.sub"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.sub"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 -% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.mul"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.mul"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 *% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.div_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.div_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); @@ -1159,10 +1260,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, div); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.div_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.div_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); @@ -1170,10 +1271,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u32, div); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.rem_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.rem_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); @@ -1182,10 +1283,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, rem); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.rem_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.rem_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); @@ -1193,46 +1294,46 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u32, rem); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.and"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.and"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 & c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.or"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.or"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 | c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.xor"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.xor"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, c1 ^ c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.shl"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.shl"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.shl(u32, c1, c2 % 32)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.shr_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.shr_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i32); const c1 = self.popOperand(i32); @@ -1240,85 +1341,85 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, math.shr(i32, c1, mod)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.shr_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.shr_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.shr(u32, c1, c2 % 32)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.rotl"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.rotl"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.rotl(u32, c1, c2 % 32)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.rotr"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.rotr"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u32); const c1 = self.popOperand(u32); self.pushOperandNoCheck(u32, math.rotr(u32, c1, c2 % 32)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.clz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.clz"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @clz(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.ctz"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.ctz"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @ctz(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.popcnt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.popcnt"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @popCount(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.add"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.add"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 +% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.sub"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.sub"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 -% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.mul"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.mul"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 *% c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.div_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.div_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); @@ -1326,10 +1427,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, div); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.div_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.div_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); @@ -1337,10 +1438,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, div); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.rem_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.rem_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); @@ -1349,10 +1450,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, rem); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.rem_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.rem_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); @@ -1360,46 +1461,46 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, rem); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.and"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.and"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 & c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.or"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.or"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 | c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.xor"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.xor"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, c1 ^ c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.shl"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.shl"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.shl(u64, c1, c2 % 64)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.shr_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.shr_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(i64); const c1 = self.popOperand(i64); @@ -1407,77 +1508,77 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, math.shr(i64, c1, mod)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.shr_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.shr_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.shr(u64, c1, c2 % 64)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.rotl"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.rotl"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.rotl(u64, c1, c2 % 64)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.rotr"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.rotr"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(u64); const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, math.rotr(u64, c1, c2 % 64)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.abs"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.abs"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, math.fabs(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.neg"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.neg"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, -c1); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.ceil"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.ceil"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, @ceil(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.floor"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.floor"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, @floor(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.trunc"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.trunc"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, @trunc(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.nearest"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.nearest"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); const floor = @floor(c1); const ceil = @ceil(c1); @@ -1492,64 +1593,64 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f32, @round(c1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.sqrt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.sqrt"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, math.sqrt(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.add"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.add"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 + c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.sub"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.sub"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 - c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.mul"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.mul"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 * c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.div"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.div"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); self.pushOperandNoCheck(f32, c1 / c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.min"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.min"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); if (math.isNan(c1)) { self.pushOperandNoCheck(f32, math.nan_f32); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (math.isNan(c2)) { self.pushOperandNoCheck(f32, math.nan_f32); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (c1 == 0.0 and c2 == 0.0) { @@ -1562,20 +1663,20 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f32, @min(c1, c2)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.max"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.max"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); if (math.isNan(c1)) { self.pushOperandNoCheck(f32, math.nan_f32); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (math.isNan(c2)) { self.pushOperandNoCheck(f32, math.nan_f32); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (c1 == 0.0 and c2 == 0.0) { @@ -1588,10 +1689,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f32, @max(c1, c2)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.copysign"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.copysign"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f32); const c1 = self.popOperand(f32); @@ -1601,50 +1702,50 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f32, math.fabs(c1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.abs"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.abs"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, math.fabs(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.neg"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.neg"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, -c1); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.ceil"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.ceil"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, @ceil(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.floor"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.floor"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, @floor(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.trunc"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.trunc"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, @trunc(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.nearest"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.nearest"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); const floor = @floor(c1); const ceil = @ceil(c1); @@ -1659,60 +1760,60 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f64, @round(c1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.sqrt"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.sqrt"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, math.sqrt(c1)); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.add"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.add"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 + c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.sub"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.sub"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 - c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.mul"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.mul"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 * c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.div"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.div"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); self.pushOperandNoCheck(f64, c1 / c2); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.min"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.min"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); if (math.isNan(c1) or math.isNan(c2)) { self.pushOperandNoCheck(f64, math.nan_f64); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (c1 == 0.0 and c2 == 0.0) { @@ -1725,16 +1826,16 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f64, @min(c1, c2)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.max"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.max"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); if (math.isNan(c1) or math.isNan(c2)) { self.pushOperandNoCheck(f64, math.nan_f64); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (c1 == 0.0 and c2 == 0.0) { @@ -1747,10 +1848,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f64, @max(c1, c2)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.copysign"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.copysign"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c2 = self.popOperand(f64); const c1 = self.popOperand(f64); @@ -1760,18 +1861,18 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(f64, math.fabs(c1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.wrap_i64"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.wrap_i64"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i32, @as(i32, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.trunc_f32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_f32_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); if (math.isNan(c1)) return error.InvalidConversion; @@ -1783,10 +1884,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, @as(i32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.trunc_f32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_f32_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); if (math.isNan(c1)) return error.InvalidConversion; @@ -1798,10 +1899,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u32, @as(u32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.trunc_f64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_f64_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); if (math.isNan(c1)) return error.InvalidConversion; @@ -1813,10 +1914,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, @as(i32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.trunc_f64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_f64_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); if (math.isNan(c1)) return error.InvalidConversion; @@ -1828,26 +1929,26 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u32, @as(u32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.extend_i32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.extend_i32_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @as(i32, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.extend_i32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.extend_i32_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(u64, @as(u32, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.trunc_f32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_f32_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); if (math.isNan(c1)) return error.InvalidConversion; @@ -1859,10 +1960,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, @as(i64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.trunc_f32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_f32_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); if (math.isNan(c1)) return error.InvalidConversion; @@ -1874,10 +1975,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, @as(u64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.trunc_f64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_f64_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); if (math.isNan(c1)) return error.InvalidConversion; @@ -1889,10 +1990,10 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i64, @as(i64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.trunc_f64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_f64_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); if (math.isNan(c1)) return error.InvalidConversion; @@ -1904,168 +2005,168 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, @as(u64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.convert_i32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.convert_i32_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(f32, @as(f32, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.convert_i32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.convert_i32_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(f32, @as(f32, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.convert_i64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.convert_i64_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(f32, @as(f32, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.convert_i64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.convert_i64_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(f32, @as(f32, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.demote_f64"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.demote_f64"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(f32, @as(f32, @floatCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.convert_i32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.convert_i32_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(f64, @as(f64, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.convert_i32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.convert_i32_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u32); self.pushOperandNoCheck(f64, @as(f64, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.convert_i64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.convert_i64_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(f64, @as(f64, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.convert_i64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.convert_i64_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(u64); self.pushOperandNoCheck(f64, @as(f64, @floatFromInt(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.promote_f32"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.promote_f32"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(f64, @as(f64, @floatCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.reinterpret_f32"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.reinterpret_f32"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); self.pushOperandNoCheck(i32, @as(i32, @bitCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.reinterpret_f64"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.reinterpret_f64"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); self.pushOperandNoCheck(i64, @as(i64, @bitCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f32.reinterpret_i32"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f32.reinterpret_i32"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(f32, @as(f32, @bitCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"f64.reinterpret_i64"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"f64.reinterpret_i64"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(f64, @as(f64, @bitCast(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.extend8_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.extend8_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(i32, @as(i8, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.extend16_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.extend16_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i32); self.pushOperandNoCheck(i32, @as(i16, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.extend8_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.extend8_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @as(i8, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.extend16_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.extend16_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @as(i16, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.extend32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.extend32_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(i64); self.pushOperandNoCheck(i64, @as(i32, @truncate(c1))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"ref.null"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"ref.null"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { self.pushOperandNoCheck(u64, REF_NULL); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"ref.is_null"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"ref.is_null"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const value = self.popOperand(u64); if (value == REF_NULL) { @@ -2074,21 +2175,21 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(u64, 0); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"ref.func"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const funcidx = code[ip].@"ref.func"; + pub fn @"ref.func"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const funcidx = immediates[imm]; const ref = self.inst.funcaddrs.items[funcidx]; // Not sure about this at all, this could still coincidentally be zero? self.pushOperandNoCheck(u64, ref); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn misc(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - return miscDispatch(self, ip, code); + pub fn misc(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + return miscDispatch(self, ip, imm, instructions, immediates); } const misc_lookup = [18]InstructionFunction{ @@ -2096,203 +2197,206 @@ pub const VirtualMachine = struct { @"table.size", @"table.fill", }; - inline fn miscDispatch(self: *VirtualMachine, next_ip: usize, code: []Rr) WasmError!void { - const next_instr = code[next_ip].misc; + inline fn miscDispatch(self: *VirtualMachine, next_ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const next_instr = immediates[imm]; + const next_offset = self.inst.module.immediates_offset.items[next_instr]; - return try @call(.always_tail, misc_lookup[@intFromEnum(next_instr)], .{ self, next_ip, code }); + return try @call(.always_tail, misc_lookup[next_instr], .{ self, next_ip, next_offset, instructions, immediates }); } - fn @"i32.trunc_sat_f32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_sat_f32_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i32, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc >= @as(f32, @floatFromInt(math.maxInt(i32)))) { self.pushOperandNoCheck(i32, @as(i32, @bitCast(@as(u32, 0x7fffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc < @as(f32, @floatFromInt(math.minInt(i32)))) { self.pushOperandNoCheck(i32, @as(i32, @bitCast(@as(u32, 0x80000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } self.pushOperandNoCheck(i32, @as(i32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.trunc_sat_f32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_sat_f32_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u32, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc >= @as(f32, @floatFromInt(math.maxInt(u32)))) { self.pushOperandNoCheck(u32, @as(u32, @bitCast(@as(u32, 0xffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc < @as(f32, @floatFromInt(math.minInt(u32)))) { self.pushOperandNoCheck(u32, @as(u32, @bitCast(@as(u32, 0x00000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } self.pushOperandNoCheck(u32, @as(u32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.trunc_sat_f64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_sat_f64_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i32, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc >= @as(f64, @floatFromInt(math.maxInt(i32)))) { self.pushOperandNoCheck(i32, @as(i32, @bitCast(@as(u32, 0x7fffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc < @as(f64, @floatFromInt(math.minInt(i32)))) { self.pushOperandNoCheck(i32, @as(i32, @bitCast(@as(u32, 0x80000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } self.pushOperandNoCheck(i32, @as(i32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i32.trunc_sat_f64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i32.trunc_sat_f64_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u32, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc >= @as(f64, @floatFromInt(math.maxInt(u32)))) { self.pushOperandNoCheck(u32, @as(u32, @bitCast(@as(u32, 0xffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc < @as(f64, @floatFromInt(math.minInt(u32)))) { self.pushOperandNoCheck(u32, @as(u32, @bitCast(@as(u32, 0x00000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } self.pushOperandNoCheck(u32, @as(u32, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.trunc_sat_f32_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_sat_f32_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i64, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc >= @as(f32, @floatFromInt(math.maxInt(i64)))) { self.pushOperandNoCheck(i64, @as(i64, @bitCast(@as(u64, 0x7fffffffffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc < @as(f32, @floatFromInt(math.minInt(i64)))) { self.pushOperandNoCheck(i64, @as(i64, @bitCast(@as(u64, 0x8000000000000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } self.pushOperandNoCheck(i64, @as(i64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.trunc_sat_f32_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_sat_f32_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f32); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u64, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc >= @as(f32, @floatFromInt(math.maxInt(u64)))) { self.pushOperandNoCheck(u64, @as(u64, @bitCast(@as(u64, 0xffffffffffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc < @as(f32, @floatFromInt(math.minInt(u64)))) { self.pushOperandNoCheck(u64, @as(u64, @bitCast(@as(u64, 0x0000000000000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } self.pushOperandNoCheck(u64, @as(u64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.trunc_sat_f64_s"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_sat_f64_s"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(i64, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc >= @as(f64, @floatFromInt(math.maxInt(i64)))) { self.pushOperandNoCheck(i64, @as(i64, @bitCast(@as(u64, 0x7fffffffffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc < @as(f64, @floatFromInt(math.minInt(i64)))) { self.pushOperandNoCheck(i64, @as(i64, @bitCast(@as(u64, 0x8000000000000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } self.pushOperandNoCheck(i64, @as(i64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"i64.trunc_sat_f64_u"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"i64.trunc_sat_f64_u"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { const c1 = self.popOperand(f64); const trunc = @trunc(c1); if (math.isNan(c1)) { self.pushOperandNoCheck(u64, 0); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc >= @as(f64, @floatFromInt(math.maxInt(u64)))) { self.pushOperandNoCheck(u64, @as(u64, @bitCast(@as(u64, 0xffffffffffffffff)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } if (trunc < @as(f64, @floatFromInt(math.minInt(u64)))) { self.pushOperandNoCheck(u64, @as(u64, @bitCast(@as(u64, 0x0000000000000000)))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } self.pushOperandNoCheck(u64, @as(u64, @intFromFloat(trunc))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm, instructions, immediates); } - fn @"memory.init"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"memory.init"; + pub fn @"memory.init"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + // const meta = code[ip].misc.@"memory.init"; + const memidx = immediates[imm]; + const dataidx = immediates[imm + 1]; const n = self.popOperand(u32); const src = self.popOperand(u32); const dest = self.popOperand(u32); - const memory = try self.inst.getMemory(meta.memidx); + const memory = try self.inst.getMemory(memidx); const mem_size = memory.sizeBytes(); - const data = try self.inst.getData(meta.dataidx); + const data = try self.inst.getData(dataidx); if (@as(u33, src) + @as(u33, n) > data.data.len) return error.OutOfBoundsMemoryAccess; if (@as(u33, dest) + @as(u33, n) > mem_size) return error.OutOfBoundsMemoryAccess; if (n == 0) { - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } if (data.dropped) return error.OutOfBoundsMemoryAccess; @@ -2302,18 +2406,25 @@ pub const VirtualMachine = struct { try memory.write(u8, 0, dest + i, data.data[src + i]); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"data.drop"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const dataidx = code[ip].misc.@"data.drop"; + pub fn @"data.drop"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + // const dataidx = code[ip].misc.@"data.drop"; + const dataidx = immediates[imm]; const data = try self.inst.getData(dataidx); data.dropped = true; - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"memory.copy"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"memory.copy"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + // FIXME: use these when we support multiple memories + const src_memidx = immediates[imm]; + _ = src_memidx; + const dst_memidx = immediates[imm + 1]; + _ = dst_memidx; + const n = self.popOperand(u32); const src = self.popOperand(u32); const dest = self.popOperand(u32); @@ -2325,7 +2436,7 @@ pub const VirtualMachine = struct { if (@as(u33, dest) + @as(u33, n) > mem_size) return error.OutOfBoundsMemoryAccess; if (n == 0) { - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } // FIXME: move initial bounds check into Memory implementation @@ -2336,10 +2447,12 @@ pub const VirtualMachine = struct { memory.uncheckedCopyBackwards(dest, data[src .. src + n]); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"memory.fill"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { + pub fn @"memory.fill"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + _ = immediates[imm]; + const n = self.popOperand(u32); const value = self.popOperand(u32); const dest = self.popOperand(u32); @@ -2349,18 +2462,18 @@ pub const VirtualMachine = struct { if (@as(u33, dest) + @as(u33, n) > mem_size) return error.OutOfBoundsMemoryAccess; if (n == 0) { - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } memory.uncheckedFill(dest, n, @as(u8, @truncate(value))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"table.init"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"table.init"; - const tableidx = meta.tableidx; - const elemidx = meta.elemidx; + pub fn @"table.init"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + // const meta = code[ip].misc.@"table.init"; + const tableidx = immediates[imm]; + const elemidx = immediates[imm + 1]; const table = try self.inst.getTable(tableidx); const elem = try self.inst.getElem(elemidx); @@ -2383,24 +2496,26 @@ pub const VirtualMachine = struct { try table.set(d + i, elem.elem[s + i]); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"elem.drop"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"elem.drop"; - const elemidx = meta.elemidx; + pub fn @"elem.drop"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + // const meta = code[ip].misc.@"elem.drop"; + const elemidx = immediates[imm]; const elem = try self.inst.getElem(elemidx); elem.dropped = true; - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"table.copy"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"table.copy"; - const dest_tableidx = meta.dest_tableidx; - const src_tableidx = meta.src_tableidx; + pub fn @"table.copy"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + // const meta = code[ip].misc.@"table.copy"; + // const dest_tableidx = meta.dest_tableidx; + // const src_tableidx = meta.src_tableidx; + const dst_tableidx = immediates[imm]; + const src_tableidx = immediates[imm + 1]; - const dest_table = try self.inst.getTable(dest_tableidx); + const dest_table = try self.inst.getTable(dst_tableidx); const src_table = try self.inst.getTable(src_tableidx); const n = self.popOperand(u32); @@ -2426,12 +2541,11 @@ pub const VirtualMachine = struct { } } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 2, instructions, immediates); } - fn @"table.grow"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"table.grow"; - const tableidx = meta.tableidx; + pub fn @"table.grow"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const tableidx = immediates[imm]; const table = try self.inst.getTable(tableidx); @@ -2448,23 +2562,21 @@ pub const VirtualMachine = struct { self.pushOperandNoCheck(i32, @as(i32, -1)); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"table.size"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"table.size"; - const tableidx = meta.tableidx; + pub fn @"table.size"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const tableidx = immediates[imm]; const table = try self.inst.getTable(tableidx); self.pushOperandNoCheck(u32, @as(u32, @intCast(table.size()))); - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } - fn @"table.fill"(self: *VirtualMachine, ip: usize, code: []Rr) WasmError!void { - const meta = code[ip].misc.@"table.fill"; - const tableidx = meta.tableidx; + pub fn @"table.fill"(self: *VirtualMachine, ip: usize, imm: usize, instructions: []Instruction, immediates: []u32) WasmError!void { + const tableidx = immediates[imm]; const table = try self.inst.getTable(tableidx); @@ -2482,7 +2594,7 @@ pub const VirtualMachine = struct { try table.set(d + i, ref); } - return dispatch(self, ip + 1, code); + return dispatch(self, ip + 1, imm + 1, instructions, immediates); } // https://webassembly.github.io/spec/core/exec/instructions.html#xref-syntax-instructions-syntax-instr-control-mathsf-br-l @@ -2556,11 +2668,11 @@ pub const VirtualMachine = struct { return self.op_stack[self.op_ptr - 1]; } - fn peekOperand(self: *VirtualMachine) u64 { + pub fn peekOperand(self: *VirtualMachine) u64 { return self.op_stack[self.op_ptr - 1]; } - fn peekNthOperand(self: *VirtualMachine, index: u32) u64 { + pub fn peekNthOperand(self: *VirtualMachine, index: u32) u64 { return self.op_stack[self.op_ptr - index - 1]; } @@ -2584,7 +2696,7 @@ pub const VirtualMachine = struct { return self.frame_stack[self.frame_ptr - 1]; } - fn peekFrame(self: *VirtualMachine) *Frame { + pub fn peekFrame(self: *VirtualMachine) *Frame { return &self.frame_stack[self.frame_ptr - 1]; } @@ -2606,7 +2718,7 @@ pub const VirtualMachine = struct { // // Returns nth label on the Label stack relative to the top of the stack // - fn peekNthLabel(self: *VirtualMachine, index: u32) *Label { + pub fn peekNthLabel(self: *VirtualMachine, index: u32) *Label { return &self.label_stack[self.label_ptr - index - 1]; } diff --git a/src/module.zig b/src/module.zig index d41584f7..fbd4a181 100644 --- a/src/module.zig +++ b/src/module.zig @@ -4,6 +4,7 @@ const leb = std.leb; const math = std.math; const unicode = std.unicode; const ArrayList = std.ArrayList; +const VirtualMachine = @import("instance/vm.zig").VirtualMachine; const Rr = @import("rr.zig").Rr; const RrOpcode = @import("rr.zig").RrOpcode; const Instance = @import("instance.zig").Instance; @@ -34,7 +35,10 @@ pub const Module = struct { function_index_start: ?usize = null, data_count: ?u32 = null, element_init_offsets: ArrayList(usize), - parsed_code: ArrayList(Rr), + // parsed_code: ArrayList(Rr), + instructions: ArrayList(VirtualMachine.InstructionFunction), + immediates_offset: ArrayList(u32), + immediates: ArrayList(u32), local_types: ArrayList(LocalType), br_table_indices: ArrayList(u32), references: ArrayList(u32), @@ -56,7 +60,10 @@ pub const Module = struct { .codes = Section(Code).init(alloc), .datas = Section(DataSegment).init(alloc), .element_init_offsets = ArrayList(usize).init(alloc), - .parsed_code = ArrayList(Rr).init(alloc), + // .parsed_code = ArrayList(Rr).init(alloc), + .instructions = ArrayList(VirtualMachine.InstructionFunction).init(alloc), + .immediates_offset = ArrayList(u32).init(alloc), + .immediates = ArrayList(u32).init(alloc), .local_types = ArrayList(LocalType).init(alloc), .br_table_indices = ArrayList(u32).init(alloc), .references = ArrayList(u32).init(alloc), @@ -77,7 +84,10 @@ pub const Module = struct { self.datas.deinit(); self.element_init_offsets.deinit(); - self.parsed_code.deinit(); + // self.parsed_code.deinit(); + self.instructions.deinit(); + self.immediates_offset.deinit(); + self.immediates.deinit(); self.local_types.deinit(); self.br_table_indices.deinit(); self.references.deinit(); @@ -96,7 +106,8 @@ pub const Module = struct { // Push an initial return instruction so we don't have to // track the end of a function to use its return on invoke // See https://github.com/malcolmstill/zware/pull/133 - try self.parsed_code.append(.@"return"); + try self.instructions.append(VirtualMachine.@"return"); + try self.immediates_offset.append(0); var i: usize = 0; while (true) : (i += 1) { @@ -474,9 +485,11 @@ pub const Module = struct { try self.references.append(funcidx); - const init_offset = self.parsed_code.items.len; - try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); - try self.parsed_code.append(Rr.@"return"); + const init_offset = self.instructions.items.len; + // try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); + try self.instructions.append(VirtualMachine.@"ref.func"); + // try self.parsed_code.append(Rr.@"return"); + try self.instructions.append(VirtualMachine.@"return"); try self.element_init_offsets.append(init_offset); } @@ -505,9 +518,11 @@ pub const Module = struct { try self.references.append(funcidx); - const init_offset = self.parsed_code.items.len; - try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); - try self.parsed_code.append(Rr.@"return"); + const init_offset = self.instructions.items.len; + // try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); + try self.instructions.append(VirtualMachine.@"ref.func"); + // try self.parsed_code.append(Rr.@"return"); + try self.instructions.append(VirtualMachine.@"return"); try self.element_init_offsets.append(init_offset); } @@ -538,9 +553,11 @@ pub const Module = struct { try self.references.append(funcidx); - const init_offset = self.parsed_code.items.len; - try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); - try self.parsed_code.append(Rr.@"return"); + const init_offset = self.instructions.items.len; + // try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); + try self.instructions.append(VirtualMachine.@"ref.func"); + // try self.parsed_code.append(Rr.@"return"); + try self.instructions.append(VirtualMachine.@"return"); try self.element_init_offsets.append(init_offset); } @@ -568,9 +585,11 @@ pub const Module = struct { try self.references.append(funcidx); - const init_offset = self.parsed_code.items.len; - try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); - try self.parsed_code.append(Rr.@"return"); + const init_offset = self.instructions.items.len; + // try self.parsed_code.append(Rr{ .@"ref.func" = funcidx }); + try self.instructions.append(VirtualMachine.@"ref.func"); + // try self.parsed_code.append(Rr.@"return"); + try self.instructions.append(VirtualMachine.@"return"); try self.element_init_offsets.append(init_offset); } @@ -615,7 +634,7 @@ pub const Module = struct { var j: usize = 0; while (j < expr_count) : (j += 1) { - const init_offset = self.parsed_code.items.len; + const init_offset = self.instructions.items.len; _ = try self.readConstantExpression(.FuncRef); try self.element_init_offsets.append(init_offset); } @@ -662,7 +681,8 @@ pub const Module = struct { const count = try self.readULEB128(u32); self.codes.count = count; - try self.parsed_code.ensureTotalCapacity(count * 32); + // FIXME: better heuristic + also ensure immediates_offset + immediates + try self.instructions.ensureTotalCapacity(count * 32); if (count == 0) return; diff --git a/src/module/parser.zig b/src/module/parser.zig index 0c8f8d24..d3a5ead6 100644 --- a/src/module/parser.zig +++ b/src/module/parser.zig @@ -11,14 +11,17 @@ const Type = @import("validator.zig").Type; const ValType = @import("../valtype.zig").ValType; const RefType = @import("../valtype.zig").RefType; const Range = @import("../rr.zig").Range; -const Rr = @import("../rr.zig").Rr; +const RrOpcode = @import("../rr.zig").RrOpcode; const MiscRr = @import("../rr.zig").MiscRr; +const VirtualMachine = @import("../instance/vm.zig").VirtualMachine; pub const Parsed = struct { start: usize, max_depth: usize, }; +const ContinuationStackEntry = struct { offset: usize, opcode: RrOpcode }; + pub const Parser = struct { function: []const u8 = undefined, code: []const u8 = undefined, @@ -27,14 +30,14 @@ pub const Parser = struct { validator: Validator = undefined, params: ?[]const ValType, locals: ?[]LocalType, - continuation_stack: [1024]usize = [_]usize{0} ** 1024, + continuation_stack: [1024]ContinuationStackEntry = undefined, continuation_stack_ptr: usize, is_constant: bool = false, scope: usize, pub fn init(module: *Module) Parser { return Parser{ - .code_ptr = module.parsed_code.items.len, + .code_ptr = module.instructions.items.len, .module = module, .params = null, .locals = null, @@ -53,19 +56,21 @@ pub const Parser = struct { self.function = code; self.code = code; - const code_start = self.module.parsed_code.items.len; + const code_start = self.module.instructions.items.len; try self.pushFunction(locals, funcidx); while (try self.next()) |instr| { - try self.module.parsed_code.append(instr); + _ = instr; + // try self.module.instructions.append(VirtualMachine.lookup[@intFromEnum(instr)]); } const bytes_read = self.bytesRead(); _ = try self.module.readSlice(bytes_read); + // FIXME: might setting .block in ControlFrame mean we don't have to replace final end with return? // Patch last end so that it is return - self.module.parsed_code.items[self.module.parsed_code.items.len - 1] = .@"return"; + self.module.instructions.items[self.module.instructions.items.len - 1] = VirtualMachine.@"return"; return Parsed{ .start = code_start, .max_depth = self.validator.max_depth }; } @@ -76,7 +81,7 @@ pub const Parser = struct { self.function = code; self.code = code; - const code_start = self.module.parsed_code.items.len; + const code_start = self.module.instructions.items.len; const in: [0]ValType = [_]ValType{} ** 0; const out: [1]ValType = [_]ValType{valtype} ** 1; @@ -100,14 +105,15 @@ pub const Parser = struct { => {}, else => return error.ValidatorConstantExpressionRequired, } - try self.module.parsed_code.append(instr); + // try self.module.instructions.append(VirtualMachine.lookup[@intFromEnum(instr)]); } const bytes_read = self.bytesRead(); _ = try self.module.readSlice(bytes_read); + // FIXME: might setting .block in ControlFrame mean we don't have to replace final end with return? // Patch last end so that it is return - self.module.parsed_code.items[self.module.parsed_code.items.len - 1] = .@"return"; + self.module.instructions.items[self.module.instructions.items.len - 1] = VirtualMachine.@"return"; return Parsed{ .start = code_start, .max_depth = self.validator.max_depth }; } @@ -121,25 +127,26 @@ pub const Parser = struct { self.locals = locals; try self.validator.pushControlFrame( + // FIXME: might setting this as block mean we don't have to replace final end with return? .nop, // block? functype.params[0..0], functype.results, ); } - fn pushContinuationStack(self: *Parser, offset: usize) !void { + fn pushContinuationStack(self: *Parser, offset: usize, opcode: RrOpcode) !void { defer self.continuation_stack_ptr += 1; if (self.continuation_stack_ptr >= self.continuation_stack.len) return error.ContinuationStackOverflow; - self.continuation_stack[self.continuation_stack_ptr] = offset; + self.continuation_stack[self.continuation_stack_ptr] = .{ .offset = offset, .opcode = opcode }; } - fn peekContinuationStack(self: *Parser) !usize { + fn peekContinuationStack(self: *Parser) !ContinuationStackEntry { if (self.continuation_stack_ptr <= 0) return error.ContinuationStackUnderflow; // No test covering this return self.continuation_stack[self.continuation_stack_ptr - 1]; } - fn popContinuationStack(self: *Parser) !usize { + fn popContinuationStack(self: *Parser) !ContinuationStackEntry { if (self.continuation_stack_ptr <= 0) return error.ContinuationStackUnderflow; self.continuation_stack_ptr -= 1; @@ -150,7 +157,7 @@ pub const Parser = struct { return self.function.len - self.code.len; } - pub fn next(self: *Parser) !?Rr { + pub fn next(self: *Parser) !?Opcode { defer self.code_ptr += 1; if (self.scope > 0 and self.code.len == 0) return error.CouldntFindEnd; @@ -161,12 +168,16 @@ pub const Parser = struct { const instr = std.meta.intToEnum(Opcode, self.code[0]) catch return error.IllegalOpcode; self.code = self.code[1..]; - var rr: Rr = undefined; + // Record the start of this instruction's immediates + const immediates_count = std.math.cast(u32, self.module.immediates.items.len) orelse return error.TooManyImmediates; // FIXME: error or assert + try self.module.immediates_offset.append(immediates_count); + + // std.debug.print("instr[{}] = {}, immediate offset = {}\n", .{ self.code_ptr, instr, immediates_count }); // 2. Find the start of the next instruction switch (instr) { - .@"unreachable" => rr = Rr.@"unreachable", - .nop => rr = Rr.nop, + .@"unreachable" => {}, + .nop => {}, .block => { const block_type = try self.readILEB128Mem(i32); @@ -195,16 +206,20 @@ pub const Parser = struct { } } - try self.pushContinuationStack(self.code_ptr); + try self.pushContinuationStack(self.code_ptr, .block); self.scope += 1; - rr = Rr{ - .block = .{ - .param_arity = block_params, - .return_arity = block_returns, - .branch_target = 0, - }, - }; + try self.module.immediates.append(block_params); + try self.module.immediates.append(block_returns); + try self.module.immediates.append(0); + + // Rr{ + // .block = .{ + // .param_arity = block_params, + // .return_arity = block_returns, + // .branch_target = 0, + // }, + // }; }, .loop => { const block_type = try self.readILEB128Mem(i32); @@ -233,16 +248,20 @@ pub const Parser = struct { } } - try self.pushContinuationStack(self.code_ptr); + try self.pushContinuationStack(self.code_ptr, .loop); self.scope += 1; - rr = Rr{ - .loop = .{ - .param_arity = block_params, - .return_arity = block_params, - .branch_target = math.cast(u32, self.code_ptr) orelse return error.FailedCast, - }, - }; + try self.module.immediates.append(block_params); + try self.module.immediates.append(block_params); + try self.module.immediates.append(math.cast(u32, self.code_ptr) orelse return error.FailedCast); + + // rr = Rr{ + // .loop = .{ + // .param_arity = block_params, + // .return_arity = block_params, + // .branch_target = math.cast(u32, self.code_ptr) orelse return error.FailedCast, + // }, + // }; }, .@"if" => { const block_type = try self.readILEB128Mem(i32); @@ -276,72 +295,88 @@ pub const Parser = struct { } } - try self.pushContinuationStack(self.code_ptr); + try self.pushContinuationStack(self.code_ptr, .@"if"); self.scope += 1; - rr = Rr{ - .if_no_else = .{ - .param_arity = block_params, - .return_arity = block_returns, - .branch_target = 0, - }, - }; + try self.module.immediates.append(block_params); + try self.module.immediates.append(block_returns); + try self.module.immediates.append(0); + // an if with no else only has 3 immediates, but we push a fourth here + // so we can exchange the if with an if_with_else + try self.module.immediates.append(0); + + // FIXME: we have found an if, but we were actually pushing an if_no_else + // i.e. we assume we don't have an else until we find one (and if we + // do we replace the if_no_else with a plain if). We could turn this + // around, so that e.g. if means if-no-else and then have a if-with-else + // instruction + // + // rr = Rr{ + // .if_no_else = .{ + // .param_arity = block_params, + // .return_arity = block_returns, + // .branch_target = 0, + // }, + // }; }, .@"else" => { - const parsed_code_offset = try self.peekContinuationStack(); - - switch (self.module.parsed_code.items[parsed_code_offset]) { - .if_no_else => |*b| { - self.module.parsed_code.items[parsed_code_offset] = Rr{ - .@"if" = .{ - .param_arity = b.param_arity, - .return_arity = b.return_arity, - .branch_target = 0, - .else_ip = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast, - }, - }; + const pushed_instruction = try self.peekContinuationStack(); + const immediates_offset = self.module.immediates_offset.items[pushed_instruction.offset]; + + switch (pushed_instruction.opcode) { + .@"if" => { + self.module.immediates.items[immediates_offset + 3] = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast; + self.module.instructions.items[pushed_instruction.offset] = VirtualMachine.if_with_else; }, else => return error.UnexpectedInstruction, } - rr = Rr.@"else"; + // rr = Rr.@"else"; }, .end => { self.scope -= 1; // If we're not looking at the `end` of a function if (self.scope != 0) { - const parsed_code_offset = try self.popContinuationStack(); + const pushed_instruction = try self.popContinuationStack(); + const immediate_offset = self.module.immediates_offset.items[pushed_instruction.offset]; + // std.debug.print("instr[{}]: end immediate_offset = {}\n", .{ pushed_instruction.offset, immediate_offset }); - switch (self.module.parsed_code.items[parsed_code_offset]) { - .block => |*b| b.branch_target = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast, + switch (pushed_instruction.opcode) { + .block => self.module.immediates.items[immediate_offset + 2] = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast, .loop => {}, - .@"if" => |*b| { - b.branch_target = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast; + .if_with_else => { + self.module.immediates.items[immediate_offset + 2] = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast; }, - .if_no_else => |*b| { + .@"if" => { + const param_arity = self.module.immediates.items[immediate_offset]; + const return_arity = self.module.immediates.items[immediate_offset + 1]; // We have an if with no else, check that this works arity-wise and replace with fast if - if (b.param_arity -% b.return_arity != 0) return error.ValidatorElseBranchExpected; + if (param_arity -% return_arity != 0) return error.ValidatorElseBranchExpected; - b.branch_target = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast; + self.module.immediates.items[immediate_offset + 2] = math.cast(u32, self.code_ptr + 1) orelse return error.FailedCast; }, else => return error.UnexpectedInstruction, } } - rr = Rr.end; + // rr = Rr.end; }, .br => { const label = try self.readULEB128Mem(u32); try self.validator.validateBr(label); - rr = Rr{ .br = label }; + + try self.module.immediates.append(label); + // rr = Rr{ .br = label }; }, .br_if => { const label = try self.readULEB128Mem(u32); try self.validator.validateBrIf(label); - rr = Rr{ .br_if = label }; + + try self.module.immediates.append(label); + // rr = Rr{ .br_if = label }; }, .br_table => { - const label_start = self.module.br_table_indices.items.len; + const label_start = math.cast(u32, self.module.br_table_indices.items.len) orelse return error.TooManyBrTableIndices; const label_count = try self.readULEB128Mem(u32); var j: usize = 0; @@ -354,14 +389,18 @@ pub const Parser = struct { try self.validator.validateBrTable(l_star, ln); - rr = Rr{ - .br_table = .{ - .ls = Range{ .offset = label_start, .count = label_count }, - .ln = ln, - }, - }; + try self.module.immediates.append(label_start); + try self.module.immediates.append(label_count); + try self.module.immediates.append(ln); + + // rr = Rr{ + // .br_table = .{ + // .ls = Range{ .offset = label_start, .count = label_count }, + // .ln = ln, + // }, + // }; }, - .@"return" => rr = Rr.@"return", + .@"return" => {}, .call => { const funcidx = try self.readULEB128Mem(u32); const func = try self.module.functions.lookup(funcidx); @@ -369,7 +408,15 @@ pub const Parser = struct { try self.validator.validateCall(functype); - rr = Rr{ .call = funcidx }; + try self.module.immediates.append(funcidx); + // To allow swapping out a .call with a .fast_call we need enough space for all .fast_call immediates: + try self.module.immediates.append(0); + try self.module.immediates.append(0); + try self.module.immediates.append(0); + try self.module.immediates.append(0); + try self.module.immediates.append(0); + + // rr = Rr{ .call = funcidx }; // TODO: do the replacement at instantiate-time for a fastcall if in same module? // rr = Rr{ .fast_call = .{ .ip_start = 0, .params = 1, .locals = 0, .results = 1 } }; }, @@ -382,15 +429,18 @@ pub const Parser = struct { try self.validator.validateCallIndirect(functype); - rr = Rr{ - .call_indirect = .{ - .typeidx = typeidx, - .tableidx = tableidx, - }, - }; + try self.module.immediates.append(typeidx); + try self.module.immediates.append(tableidx); + + // rr = Rr{ + // .call_indirect = .{ + // .typeidx = typeidx, + // .tableidx = tableidx, + // }, + // }; }, - .drop => rr = Rr.drop, - .select => rr = Rr.select, + .drop => {}, + .select => {}, .select_t => { const type_count = try self.readULEB128Mem(u32); if (type_count != 1) return error.OnlyOneSelectTTypeSupported; // Future versions may support more than one @@ -398,8 +448,6 @@ pub const Parser = struct { const valuetype = try std.meta.intToEnum(ValType, valuetype_raw); try self.validator.validateSelectT(valuetype); - - rr = Rr.select; }, .@"global.get" => { const globalidx = try self.readULEB128Mem(u32); @@ -407,7 +455,9 @@ pub const Parser = struct { try self.validator.validateGlobalGet(global); - rr = Rr{ .@"global.get" = globalidx }; + try self.module.immediates.append(globalidx); + + // rr = Rr{ .@"global.get" = globalidx }; }, .@"global.set" => { const globalidx = try self.readULEB128Mem(u32); @@ -415,7 +465,8 @@ pub const Parser = struct { try self.validator.validateGlobalSet(global); - rr = Rr{ .@"global.set" = globalidx }; + try self.module.immediates.append(globalidx); + // rr = Rr{ .@"global.set" = globalidx }; }, .@"table.get" => { const tableidx = try self.readULEB128Mem(u32); @@ -429,7 +480,8 @@ pub const Parser = struct { _ = try self.validator.popOperandExpecting(Type{ .Known = .I32 }); _ = try self.validator.pushOperand(Type{ .Known = reftype }); - rr = Rr{ .@"table.get" = tableidx }; + try self.module.immediates.append(tableidx); + // rr = Rr{ .@"table.get" = tableidx }; }, .@"table.set" => { const tableidx = try self.readULEB128Mem(u32); @@ -443,7 +495,8 @@ pub const Parser = struct { _ = try self.validator.popOperandExpecting(Type{ .Known = reftype }); _ = try self.validator.popOperandExpecting(Type{ .Known = .I32 }); - rr = Rr{ .@"table.set" = tableidx }; + try self.module.immediates.append(tableidx); + // rr = Rr{ .@"table.set" = tableidx }; }, .@"local.get" => { const localidx = try self.readULEB128Mem(u32); @@ -471,7 +524,8 @@ pub const Parser = struct { } } - rr = Rr{ .@"local.get" = localidx }; + try self.module.immediates.append(localidx); + // rr = Rr{ .@"local.get" = localidx }; }, .@"local.set" => { const localidx = try self.readULEB128Mem(u32); @@ -500,7 +554,9 @@ pub const Parser = struct { } } - rr = Rr{ .@"local.set" = localidx }; + try self.module.immediates.append(localidx); + + // rr = Rr{ .@"local.set" = localidx }; }, .@"local.tee" => { const localidx = try self.readULEB128Mem(u32); @@ -529,37 +585,53 @@ pub const Parser = struct { } } - rr = Rr{ .@"local.tee" = localidx }; + try self.module.immediates.append(localidx); + // rr = Rr{ .@"local.tee" = localidx }; }, .@"memory.size" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; const memidx = try self.readByte(); if (memidx != 0) return error.MalformedMemoryReserved; - rr = Rr{ .@"memory.size" = memidx }; + try self.module.immediates.append(memidx); + // rr = Rr{ .@"memory.size" = memidx }; }, .@"memory.grow" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; const memidx = try self.readByte(); if (memidx != 0) return error.MalformedMemoryReserved; - rr = Rr{ .@"memory.grow" = memidx }; + try self.module.immediates.append(memidx); + // rr = Rr{ .@"memory.grow" = memidx }; }, .@"i32.const" => { const i32_const = try self.readILEB128Mem(i32); - rr = Rr{ .@"i32.const" = i32_const }; + + try self.module.immediates.append(@as(u32, @bitCast(i32_const))); + // rr = Rr{ .@"i32.const" = i32_const }; }, .@"i64.const" => { const i64_const = try self.readILEB128Mem(i64); - rr = Rr{ .@"i64.const" = i64_const }; + const u64_const = @as(u64, @bitCast(i64_const)); + + try self.module.immediates.append(@as(u32, @truncate(u64_const & 0xFFFFFFFF))); + try self.module.immediates.append(@as(u32, @truncate(u64_const >> 32))); + // rr = Rr{ .@"i64.const" = i64_const }; }, .@"f32.const" => { const float_const: f32 = @bitCast(try self.readU32()); - rr = Rr{ .@"f32.const" = float_const }; + + try self.module.immediates.append(@as(u32, @bitCast(float_const))); + // rr = Rr{ .@"f32.const" = float_const }; }, .@"f64.const" => { const float_const: f64 = @bitCast(try self.readU64()); - rr = Rr{ .@"f64.const" = float_const }; + const u64_float = @as(u64, @bitCast(float_const)); + + try self.module.immediates.append(@as(u32, @truncate(u64_float & 0xFFFF))); + try self.module.immediates.append(@as(u32, @truncate(u64_float >> 32))); + + // rr = Rr{ .@"f64.const" = float_const }; }, .@"i32.load" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -569,12 +641,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 32) return error.InvalidAlignment; - rr = Rr{ - .@"i32.load" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i32.load" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i64.load" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -583,12 +657,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 64) return error.InvalidAlignment; - rr = Rr{ - .@"i64.load" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i64.load" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"f32.load" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -597,12 +673,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 32) return error.InvalidAlignment; - rr = Rr{ - .@"f32.load" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"f32.load" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"f64.load" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -611,12 +689,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 64) return error.InvalidAlignment; - rr = Rr{ - .@"f64.load" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"f64.load" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i32.load8_s" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -625,12 +705,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 8) return error.InvalidAlignment; - rr = Rr{ - .@"i32.load8_s" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i32.load8_s" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i32.load8_u" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -639,12 +721,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 8) return error.InvalidAlignment; - rr = Rr{ - .@"i32.load8_u" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i32.load8_u" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i32.load16_s" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -653,12 +737,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 16) return error.InvalidAlignment; - rr = Rr{ - .@"i32.load16_s" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i32.load16_s" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i32.load16_u" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -667,12 +753,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 16) return error.InvalidAlignment; - rr = Rr{ - .@"i32.load16_u" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i32.load16_u" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i64.load8_s" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -681,12 +769,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 8) return error.InvalidAlignment; - rr = Rr{ - .@"i64.load8_s" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i64.load8_s" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i64.load8_u" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -695,12 +785,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 8) return error.InvalidAlignment; - rr = Rr{ - .@"i64.load8_u" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i64.load8_u" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i64.load16_s" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -709,12 +801,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 16) return error.InvalidAlignment; - rr = Rr{ - .@"i64.load16_s" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i64.load16_s" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i64.load16_u" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -723,12 +817,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 16) return error.InvalidAlignment; - rr = Rr{ - .@"i64.load16_u" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i64.load16_u" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i64.load32_s" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -737,12 +833,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 32) return error.InvalidAlignment; - rr = Rr{ - .@"i64.load32_s" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i64.load32_s" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i64.load32_u" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -751,12 +849,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 32) return error.InvalidAlignment; - rr = Rr{ - .@"i64.load32_u" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i64.load32_u" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i32.store" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -765,12 +865,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 32) return error.InvalidAlignment; - rr = Rr{ - .@"i32.store" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i32.store" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i64.store" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -779,12 +881,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 64) return error.InvalidAlignment; - rr = Rr{ - .@"i64.store" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i64.store" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"f32.store" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -793,12 +897,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 32) return error.InvalidAlignment; - rr = Rr{ - .@"f32.store" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"f32.store" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"f64.store" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -807,12 +913,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 64) return error.InvalidAlignment; - rr = Rr{ - .@"f64.store" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"f64.store" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i32.store8" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -821,12 +929,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 8) return error.InvalidAlignment; - rr = Rr{ - .@"i32.store8" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i32.store8" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i32.store16" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -835,12 +945,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 16) return error.InvalidAlignment; - rr = Rr{ - .@"i32.store16" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i32.store16" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i64.store8" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -849,12 +961,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 8) return error.InvalidAlignment; - rr = Rr{ - .@"i64.store8" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i64.store8" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i64.store16" => { if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; @@ -863,12 +977,14 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 16) return error.InvalidAlignment; - rr = Rr{ - .@"i64.store16" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i64.store16" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, .@"i64.store32" => { const alignment = try self.readULEB128Mem(u32); @@ -876,149 +992,153 @@ pub const Parser = struct { if (try math.mul(u32, 8, try math.powi(u32, 2, alignment)) > 32) return error.InvalidAlignment; - rr = Rr{ - .@"i64.store32" = .{ - .alignment = alignment, - .offset = offset, - }, - }; + try self.module.immediates.append(alignment); + try self.module.immediates.append(offset); + // rr = Rr{ + // .@"i64.store32" = .{ + // .alignment = alignment, + // .offset = offset, + // }, + // }; }, - .@"i32.eqz" => rr = Rr.@"i32.eqz", - .@"i32.eq" => rr = Rr.@"i32.eq", - .@"i32.ne" => rr = Rr.@"i32.ne", - .@"i32.lt_s" => rr = Rr.@"i32.lt_s", - .@"i32.lt_u" => rr = Rr.@"i32.lt_u", - .@"i32.gt_s" => rr = Rr.@"i32.gt_s", - .@"i32.gt_u" => rr = Rr.@"i32.gt_u", - .@"i32.le_s" => rr = Rr.@"i32.le_s", - .@"i32.le_u" => rr = Rr.@"i32.le_u", - .@"i32.ge_s" => rr = Rr.@"i32.ge_s", - .@"i32.ge_u" => rr = Rr.@"i32.ge_u", - .@"i64.eqz" => rr = Rr.@"i64.eqz", - .@"i64.eq" => rr = Rr.@"i64.eq", - .@"i64.ne" => rr = Rr.@"i64.ne", - .@"i64.lt_s" => rr = Rr.@"i64.lt_s", - .@"i64.lt_u" => rr = Rr.@"i64.lt_u", - .@"i64.gt_s" => rr = Rr.@"i64.gt_s", - .@"i64.gt_u" => rr = Rr.@"i64.gt_u", - .@"i64.le_s" => rr = Rr.@"i64.le_s", - .@"i64.le_u" => rr = Rr.@"i64.le_u", - .@"i64.ge_s" => rr = Rr.@"i64.ge_s", - .@"i64.ge_u" => rr = Rr.@"i64.ge_u", - .@"f32.eq" => rr = Rr.@"f32.eq", - .@"f32.ne" => rr = Rr.@"f32.ne", - .@"f32.lt" => rr = Rr.@"f32.lt", - .@"f32.gt" => rr = Rr.@"f32.gt", - .@"f32.le" => rr = Rr.@"f32.le", - .@"f32.ge" => rr = Rr.@"f32.ge", - .@"f64.eq" => rr = Rr.@"f64.eq", - .@"f64.ne" => rr = Rr.@"f64.ne", - .@"f64.lt" => rr = Rr.@"f64.lt", - .@"f64.gt" => rr = Rr.@"f64.gt", - .@"f64.le" => rr = Rr.@"f64.le", - .@"f64.ge" => rr = Rr.@"f64.ge", - .@"i32.clz" => rr = Rr.@"i32.clz", - .@"i32.ctz" => rr = Rr.@"i32.ctz", - .@"i32.popcnt" => rr = Rr.@"i32.popcnt", - .@"i32.add" => rr = Rr.@"i32.add", - .@"i32.sub" => rr = Rr.@"i32.sub", - .@"i32.mul" => rr = Rr.@"i32.mul", - .@"i32.div_s" => rr = Rr.@"i32.div_s", - .@"i32.div_u" => rr = Rr.@"i32.div_u", - .@"i32.rem_s" => rr = Rr.@"i32.rem_s", - .@"i32.rem_u" => rr = Rr.@"i32.rem_u", - .@"i32.and" => rr = Rr.@"i32.and", - .@"i32.or" => rr = Rr.@"i32.or", - .@"i32.xor" => rr = Rr.@"i32.xor", - .@"i32.shl" => rr = Rr.@"i32.shl", - .@"i32.shr_s" => rr = Rr.@"i32.shr_s", - .@"i32.shr_u" => rr = Rr.@"i32.shr_u", - .@"i32.rotl" => rr = Rr.@"i32.rotl", - .@"i32.rotr" => rr = Rr.@"i32.rotr", - .@"i64.clz" => rr = Rr.@"i64.clz", - .@"i64.ctz" => rr = Rr.@"i64.ctz", - .@"i64.popcnt" => rr = Rr.@"i64.popcnt", - .@"i64.add" => rr = Rr.@"i64.add", - .@"i64.sub" => rr = Rr.@"i64.sub", - .@"i64.mul" => rr = Rr.@"i64.mul", - .@"i64.div_s" => rr = Rr.@"i64.div_s", - .@"i64.div_u" => rr = Rr.@"i64.div_u", - .@"i64.rem_s" => rr = Rr.@"i64.rem_s", - .@"i64.rem_u" => rr = Rr.@"i64.rem_u", - .@"i64.and" => rr = Rr.@"i64.and", - .@"i64.or" => rr = Rr.@"i64.or", - .@"i64.xor" => rr = Rr.@"i64.xor", - .@"i64.shl" => rr = Rr.@"i64.shl", - .@"i64.shr_s" => rr = Rr.@"i64.shr_s", - .@"i64.shr_u" => rr = Rr.@"i64.shr_u", - .@"i64.rotl" => rr = Rr.@"i64.rotl", - .@"i64.rotr" => rr = Rr.@"i64.rotr", - .@"f32.abs" => rr = Rr.@"f32.abs", - .@"f32.neg" => rr = Rr.@"f32.neg", - .@"f32.ceil" => rr = Rr.@"f32.ceil", - .@"f32.floor" => rr = Rr.@"f32.floor", - .@"f32.trunc" => rr = Rr.@"f32.trunc", - .@"f32.nearest" => rr = Rr.@"f32.nearest", - .@"f32.sqrt" => rr = Rr.@"f32.sqrt", - .@"f32.add" => rr = Rr.@"f32.add", - .@"f32.sub" => rr = Rr.@"f32.sub", - .@"f32.mul" => rr = Rr.@"f32.mul", - .@"f32.div" => rr = Rr.@"f32.div", - .@"f32.min" => rr = Rr.@"f32.min", - .@"f32.max" => rr = Rr.@"f32.max", - .@"f32.copysign" => rr = Rr.@"f32.copysign", - .@"f64.abs" => rr = Rr.@"f64.abs", - .@"f64.neg" => rr = Rr.@"f64.neg", - .@"f64.ceil" => rr = Rr.@"f64.ceil", - .@"f64.floor" => rr = Rr.@"f64.floor", - .@"f64.trunc" => rr = Rr.@"f64.trunc", - .@"f64.nearest" => rr = Rr.@"f64.nearest", - .@"f64.sqrt" => rr = Rr.@"f64.sqrt", - .@"f64.add" => rr = Rr.@"f64.add", - .@"f64.sub" => rr = Rr.@"f64.sub", - .@"f64.mul" => rr = Rr.@"f64.mul", - .@"f64.div" => rr = Rr.@"f64.div", - .@"f64.min" => rr = Rr.@"f64.min", - .@"f64.max" => rr = Rr.@"f64.max", - .@"f64.copysign" => rr = Rr.@"f64.copysign", - .@"i32.wrap_i64" => rr = Rr.@"i32.wrap_i64", - .@"i32.trunc_f32_s" => rr = Rr.@"i32.trunc_f32_s", - .@"i32.trunc_f32_u" => rr = Rr.@"i32.trunc_f32_u", - .@"i32.trunc_f64_s" => rr = Rr.@"i32.trunc_f64_s", - .@"i32.trunc_f64_u" => rr = Rr.@"i32.trunc_f64_u", - .@"i64.extend_i32_s" => rr = Rr.@"i64.extend_i32_s", - .@"i64.extend_i32_u" => rr = Rr.@"i64.extend_i32_u", - .@"i64.trunc_f32_s" => rr = Rr.@"i64.trunc_f32_s", - .@"i64.trunc_f32_u" => rr = Rr.@"i64.trunc_f32_u", - .@"i64.trunc_f64_s" => rr = Rr.@"i64.trunc_f64_s", - .@"i64.trunc_f64_u" => rr = Rr.@"i64.trunc_f64_u", - .@"f32.convert_i32_s" => rr = Rr.@"f32.convert_i32_s", - .@"f32.convert_i32_u" => rr = Rr.@"f32.convert_i32_u", - .@"f32.convert_i64_s" => rr = Rr.@"f32.convert_i64_s", - .@"f32.convert_i64_u" => rr = Rr.@"f32.convert_i64_u", - .@"f32.demote_f64" => rr = Rr.@"f32.demote_f64", - .@"f64.convert_i32_s" => rr = Rr.@"f64.convert_i32_s", - .@"f64.convert_i32_u" => rr = Rr.@"f64.convert_i32_u", - .@"f64.convert_i64_s" => rr = Rr.@"f64.convert_i64_s", - .@"f64.convert_i64_u" => rr = Rr.@"f64.convert_i64_u", - .@"f64.promote_f32" => rr = Rr.@"f64.promote_f32", - .@"i32.reinterpret_f32" => rr = Rr.@"i32.reinterpret_f32", - .@"i64.reinterpret_f64" => rr = Rr.@"i64.reinterpret_f64", - .@"f32.reinterpret_i32" => rr = Rr.@"f32.reinterpret_i32", - .@"f64.reinterpret_i64" => rr = Rr.@"f64.reinterpret_i64", - .@"i32.extend8_s" => rr = Rr.@"i32.extend8_s", - .@"i32.extend16_s" => rr = Rr.@"i32.extend16_s", - .@"i64.extend8_s" => rr = Rr.@"i64.extend8_s", - .@"i64.extend16_s" => rr = Rr.@"i64.extend16_s", - .@"i64.extend32_s" => rr = Rr.@"i64.extend32_s", + .@"i32.eqz" => {}, + .@"i32.eq" => {}, + .@"i32.ne" => {}, + .@"i32.lt_s" => {}, + .@"i32.lt_u" => {}, + .@"i32.gt_s" => {}, + .@"i32.gt_u" => {}, + .@"i32.le_s" => {}, + .@"i32.le_u" => {}, + .@"i32.ge_s" => {}, + .@"i32.ge_u" => {}, + .@"i64.eqz" => {}, + .@"i64.eq" => {}, + .@"i64.ne" => {}, + .@"i64.lt_s" => {}, + .@"i64.lt_u" => {}, + .@"i64.gt_s" => {}, + .@"i64.gt_u" => {}, + .@"i64.le_s" => {}, + .@"i64.le_u" => {}, + .@"i64.ge_s" => {}, + .@"i64.ge_u" => {}, + .@"f32.eq" => {}, + .@"f32.ne" => {}, + .@"f32.lt" => {}, + .@"f32.gt" => {}, + .@"f32.le" => {}, + .@"f32.ge" => {}, + .@"f64.eq" => {}, + .@"f64.ne" => {}, + .@"f64.lt" => {}, + .@"f64.gt" => {}, + .@"f64.le" => {}, + .@"f64.ge" => {}, + .@"i32.clz" => {}, + .@"i32.ctz" => {}, + .@"i32.popcnt" => {}, + .@"i32.add" => {}, + .@"i32.sub" => {}, + .@"i32.mul" => {}, + .@"i32.div_s" => {}, + .@"i32.div_u" => {}, + .@"i32.rem_s" => {}, + .@"i32.rem_u" => {}, + .@"i32.and" => {}, + .@"i32.or" => {}, + .@"i32.xor" => {}, + .@"i32.shl" => {}, + .@"i32.shr_s" => {}, + .@"i32.shr_u" => {}, + .@"i32.rotl" => {}, + .@"i32.rotr" => {}, + .@"i64.clz" => {}, + .@"i64.ctz" => {}, + .@"i64.popcnt" => {}, + .@"i64.add" => {}, + .@"i64.sub" => {}, + .@"i64.mul" => {}, + .@"i64.div_s" => {}, + .@"i64.div_u" => {}, + .@"i64.rem_s" => {}, + .@"i64.rem_u" => {}, + .@"i64.and" => {}, + .@"i64.or" => {}, + .@"i64.xor" => {}, + .@"i64.shl" => {}, + .@"i64.shr_s" => {}, + .@"i64.shr_u" => {}, + .@"i64.rotl" => {}, + .@"i64.rotr" => {}, + .@"f32.abs" => {}, + .@"f32.neg" => {}, + .@"f32.ceil" => {}, + .@"f32.floor" => {}, + .@"f32.trunc" => {}, + .@"f32.nearest" => {}, + .@"f32.sqrt" => {}, + .@"f32.add" => {}, + .@"f32.sub" => {}, + .@"f32.mul" => {}, + .@"f32.div" => {}, + .@"f32.min" => {}, + .@"f32.max" => {}, + .@"f32.copysign" => {}, + .@"f64.abs" => {}, + .@"f64.neg" => {}, + .@"f64.ceil" => {}, + .@"f64.floor" => {}, + .@"f64.trunc" => {}, + .@"f64.nearest" => {}, + .@"f64.sqrt" => {}, + .@"f64.add" => {}, + .@"f64.sub" => {}, + .@"f64.mul" => {}, + .@"f64.div" => {}, + .@"f64.min" => {}, + .@"f64.max" => {}, + .@"f64.copysign" => {}, + .@"i32.wrap_i64" => {}, + .@"i32.trunc_f32_s" => {}, + .@"i32.trunc_f32_u" => {}, + .@"i32.trunc_f64_s" => {}, + .@"i32.trunc_f64_u" => {}, + .@"i64.extend_i32_s" => {}, + .@"i64.extend_i32_u" => {}, + .@"i64.trunc_f32_s" => {}, + .@"i64.trunc_f32_u" => {}, + .@"i64.trunc_f64_s" => {}, + .@"i64.trunc_f64_u" => {}, + .@"f32.convert_i32_s" => {}, + .@"f32.convert_i32_u" => {}, + .@"f32.convert_i64_s" => {}, + .@"f32.convert_i64_u" => {}, + .@"f32.demote_f64" => {}, + .@"f64.convert_i32_s" => {}, + .@"f64.convert_i32_u" => {}, + .@"f64.convert_i64_s" => {}, + .@"f64.convert_i64_u" => {}, + .@"f64.promote_f32" => {}, + .@"i32.reinterpret_f32" => {}, + .@"i64.reinterpret_f64" => {}, + .@"f32.reinterpret_i32" => {}, + .@"f64.reinterpret_i64" => {}, + .@"i32.extend8_s" => {}, + .@"i32.extend16_s" => {}, + .@"i64.extend8_s" => {}, + .@"i64.extend16_s" => {}, + .@"i64.extend32_s" => {}, .@"ref.null" => { const rtype = try self.readULEB128Mem(i32); const reftype = std.meta.intToEnum(RefType, rtype) catch return error.MalformedRefType; try self.validator.validateRefNull(reftype); - rr = Rr{ .@"ref.null" = reftype }; + + try self.module.immediates.append(@as(u32, @intFromEnum(reftype))); + // rr = Rr{ .@"ref.null" = reftype }; }, - .@"ref.is_null" => rr = Rr.@"ref.is_null", + .@"ref.is_null" => {}, .@"ref.func" => { const funcidx = try self.readULEB128Mem(u32); if (funcidx >= self.module.functions.list.items.len) return error.ValidatorInvalidFunction; @@ -1038,7 +1158,8 @@ pub const Parser = struct { if (!in_references) return error.ValidatorUnreferencedFunction; } - rr = Rr{ .@"ref.func" = funcidx }; + try self.module.immediates.append(funcidx); + // rr = Rr{ .@"ref.func" = funcidx }; }, .misc => { const version = try self.readULEB128Mem(u32); @@ -1046,14 +1167,15 @@ pub const Parser = struct { try self.validator.validateMisc(misc_opcode); switch (misc_opcode) { - .@"i32.trunc_sat_f32_s" => rr = Rr{ .misc = MiscRr.@"i32.trunc_sat_f32_s" }, - .@"i32.trunc_sat_f32_u" => rr = Rr{ .misc = MiscRr.@"i32.trunc_sat_f32_u" }, - .@"i32.trunc_sat_f64_s" => rr = Rr{ .misc = MiscRr.@"i32.trunc_sat_f64_s" }, - .@"i32.trunc_sat_f64_u" => rr = Rr{ .misc = MiscRr.@"i32.trunc_sat_f64_u" }, - .@"i64.trunc_sat_f32_s" => rr = Rr{ .misc = MiscRr.@"i64.trunc_sat_f32_s" }, - .@"i64.trunc_sat_f32_u" => rr = Rr{ .misc = MiscRr.@"i64.trunc_sat_f32_u" }, - .@"i64.trunc_sat_f64_s" => rr = Rr{ .misc = MiscRr.@"i64.trunc_sat_f64_s" }, - .@"i64.trunc_sat_f64_u" => rr = Rr{ .misc = MiscRr.@"i64.trunc_sat_f64_u" }, + // FIXME: do I need to handle misc separately + .@"i32.trunc_sat_f32_s" => {}, + .@"i32.trunc_sat_f32_u" => {}, + .@"i32.trunc_sat_f64_s" => {}, + .@"i32.trunc_sat_f64_u" => {}, + .@"i64.trunc_sat_f32_s" => {}, + .@"i64.trunc_sat_f32_u" => {}, + .@"i64.trunc_sat_f64_s" => {}, + .@"i64.trunc_sat_f64_u" => {}, .@"memory.init" => { const dataidx = try self.readULEB128Mem(u32); const memidx = try self.readByte(); @@ -1062,14 +1184,17 @@ pub const Parser = struct { if (!(dataidx < data_count)) return error.InvalidDataIndex; if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; - rr = Rr{ - .misc = MiscRr{ - .@"memory.init" = .{ - .dataidx = dataidx, - .memidx = memidx, - }, - }, - }; + + try self.module.immediates.append(dataidx); + try self.module.immediates.append(memidx); + // rr = Rr{ + // .misc = MiscRr{ + // .@"memory.init" = .{ + // .dataidx = dataidx, + // .memidx = memidx, + // }, + // }, + // }; }, .@"data.drop" => { const dataidx = try self.readULEB128Mem(u32); @@ -1077,24 +1202,28 @@ pub const Parser = struct { const data_count = self.module.data_count orelse return error.InstructionRequiresDataCountSection; if (!(dataidx < data_count)) return error.InvalidDataIndex; - rr = Rr{ .misc = MiscRr{ .@"data.drop" = dataidx } }; + try self.module.immediates.append(dataidx); + // rr = Rr{ .misc = MiscRr{ .@"data.drop" = dataidx } }; }, .@"memory.copy" => { const src_memidx = try self.readByte(); if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; - const dest_memidx = try self.readByte(); + const dst_memidx = try self.readByte(); if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; - rr = Rr{ .misc = MiscRr{ .@"memory.copy" = .{ - .src_memidx = src_memidx, - .dest_memidx = dest_memidx, - } } }; + try self.module.immediates.append(src_memidx); + try self.module.immediates.append(dst_memidx); + // rr = Rr{ .misc = MiscRr{ .@"memory.copy" = .{ + // .src_memidx = src_memidx, + // .dest_memidx = dest_memidx, + // } } }; }, .@"memory.fill" => { const memidx = try self.readByte(); if (self.module.memories.list.items.len != 1) return error.ValidatorUnknownMemory; - rr = Rr{ .misc = MiscRr{ .@"memory.fill" = memidx } }; + try self.module.immediates.append(memidx); + // rr = Rr{ .misc = MiscRr{ .@"memory.fill" = memidx } }; }, .@"table.init" => { const elemidx = try self.readULEB128Mem(u32); @@ -1105,31 +1234,36 @@ pub const Parser = struct { if (elemtype.reftype != tabletype.reftype) return error.MismatchedTypes; - rr = Rr{ .misc = MiscRr{ .@"table.init" = .{ - .elemidx = elemidx, - .tableidx = tableidx, - } } }; + try self.module.immediates.append(elemidx); + try self.module.immediates.append(tableidx); + // rr = Rr{ .misc = MiscRr{ .@"table.init" = .{ + // .elemidx = elemidx, + // .tableidx = tableidx, + // } } }; }, .@"elem.drop" => { const elemidx = try self.readULEB128Mem(u32); if (elemidx >= self.module.elements.list.items.len) return error.ValidatorInvalidElementIndex; - rr = Rr{ .misc = MiscRr{ .@"elem.drop" = .{ .elemidx = elemidx } } }; + try self.module.immediates.append(elemidx); + // rr = Rr{ .misc = MiscRr{ .@"elem.drop" = .{ .elemidx = elemidx } } }; }, .@"table.copy" => { - const dest_tableidx = try self.readULEB128Mem(u32); - const dest_tabletype = try self.module.tables.lookup(dest_tableidx); + const dst_tableidx = try self.readULEB128Mem(u32); + const dst_tabletype = try self.module.tables.lookup(dst_tableidx); const src_tableidx = try self.readULEB128Mem(u32); const src_tabletype = try self.module.tables.lookup(src_tableidx); - if (dest_tabletype.reftype != src_tabletype.reftype) return error.MismatchedTypes; + if (dst_tabletype.reftype != src_tabletype.reftype) return error.MismatchedTypes; - rr = Rr{ .misc = MiscRr{ .@"table.copy" = .{ - .dest_tableidx = dest_tableidx, - .src_tableidx = src_tableidx, - } } }; + try self.module.immediates.append(dst_tableidx); + try self.module.immediates.append(src_tableidx); + // rr = Rr{ .misc = MiscRr{ .@"table.copy" = .{ + // .dest_tableidx = dest_tableidx, + // .src_tableidx = src_tableidx, + // } } }; }, .@"table.grow" => { const tableidx = try self.readULEB128Mem(u32); @@ -1144,18 +1278,19 @@ pub const Parser = struct { _ = try self.validator.popOperandExpecting(Type{ .Known = reftype }); try self.validator.pushOperand(Type{ .Known = .I32 }); - - rr = Rr{ .misc = MiscRr{ .@"table.grow" = .{ - .tableidx = tableidx, - } } }; + try self.module.immediates.append(tableidx); + // rr = Rr{ .misc = MiscRr{ .@"table.grow" = .{ + // .tableidx = tableidx, + // } } }; }, .@"table.size" => { const tableidx = try self.readULEB128Mem(u32); if (tableidx >= self.module.tables.list.items.len) return error.ValidatorInvalidTableIndex; - rr = Rr{ .misc = MiscRr{ .@"table.size" = .{ - .tableidx = tableidx, - } } }; + try self.module.immediates.append(tableidx); + // rr = Rr{ .misc = MiscRr{ .@"table.size" = .{ + // .tableidx = tableidx, + // } } }; }, .@"table.fill" => { const tableidx = try self.readULEB128Mem(u32); @@ -1170,16 +1305,21 @@ pub const Parser = struct { _ = try self.validator.popOperandExpecting(Type{ .Known = reftype }); _ = try self.validator.popOperandExpecting(Type{ .Known = .I32 }); - rr = Rr{ .misc = MiscRr{ .@"table.fill" = .{ - .tableidx = tableidx, - } } }; + try self.module.immediates.append(tableidx); + // rr = Rr{ .misc = MiscRr{ .@"table.fill" = .{ + // .tableidx = tableidx, + // } } }; }, } }, } + // std.debug.print("immediates = {any}\nimmeoffset = {any}\n\n", .{ self.module.immediates.items, self.module.immediates_offset.items }); try self.validator.validate(instr); - return rr; + + try self.module.instructions.append(VirtualMachine.lookup[@intFromEnum(instr)]); + + return instr; } pub fn readULEB128Mem(self: *Parser, comptime T: type) !T { diff --git a/src/rr.zig b/src/rr.zig index cf62a9fc..072cea73 100644 --- a/src/rr.zig +++ b/src/rr.zig @@ -8,7 +8,7 @@ pub const RrOpcode = enum(u8) { loop = 0x03, @"if" = 0x04, @"else" = 0x05, - if_no_else = 0x06, + if_with_else = 0x06, end = 0x0b, br = 0x0c, br_if = 0x0d, @@ -206,13 +206,13 @@ pub const Rr = union(RrOpcode) { param_arity: u16, return_arity: u16, branch_target: u32, - else_ip: u32, }, @"else": void, - if_no_else: struct { + if_with_else: struct { param_arity: u16, return_arity: u16, branch_target: u32, + else_ip: u32, }, end: void, br: u32, @@ -222,7 +222,7 @@ pub const Rr = union(RrOpcode) { ln: u32, }, @"return": void, - call: usize, // u32? + call: u32, // u32? call_indirect: struct { typeidx: u32, tableidx: u32, diff --git a/src/store/elem.zig b/src/store/elem.zig index 071a22a2..71ed4331 100644 --- a/src/store/elem.zig +++ b/src/store/elem.zig @@ -3,7 +3,7 @@ const mem = std.mem; const RefType = @import("../valtype.zig").RefType; pub const Elem = struct { - @"type": RefType, + type: RefType, elem: []u32, alloc: mem.Allocator, dropped: bool = false, @@ -12,7 +12,7 @@ pub const Elem = struct { const elem = try alloc.alloc(u32, count); return Elem{ - .@"type" = reftype, + .type = reftype, .elem = elem, .alloc = alloc, }; diff --git a/src/store/memory.zig b/src/store/memory.zig index 0895e430..984fefe1 100644 --- a/src/store/memory.zig +++ b/src/store/memory.zig @@ -55,7 +55,7 @@ pub const Memory = struct { mem.copy(u8, self.data.items[address .. address + data.len], data); } - pub fn uncheckedFill(self: *Memory, dst_address: u32, n: u32, value: u8) void { + pub fn uncheckedFill(self: *Memory, dst_address: u32, n: u32, value: u8) void { @memset(self.data.items[dst_address .. dst_address + n], value); } @@ -76,7 +76,6 @@ pub const Memory = struct { const effective_address = @as(u33, offset) + @as(u33, address); if (effective_address + @sizeOf(T) - 1 >= self.data.items.len) return error.OutOfBoundsMemoryAccess; - switch (T) { u8, u16,