11use std:: cell:: { Cell , RefCell } ;
22use std:: rc:: Rc ;
33use std:: ffi:: { c_int, c_long, c_void} ;
4+ use std:: slice;
45
56use crate :: asm:: Label ;
67use crate :: backend:: current:: { Reg , ALLOC_REGS } ;
@@ -1348,11 +1349,17 @@ fn local_idx_to_ep_offset(iseq: IseqPtr, local_idx: usize) -> i32 {
13481349 local_size_and_idx_to_ep_offset ( local_size as usize , local_idx)
13491350}
13501351
1351- /// Convert the number of locals and a local index to an offset in the EP
1352+ /// Convert the number of locals and a local index to an offset from the EP
13521353pub fn local_size_and_idx_to_ep_offset ( local_size : usize , local_idx : usize ) -> i32 {
13531354 local_size as i32 - local_idx as i32 - 1 + VM_ENV_DATA_SIZE as i32
13541355}
13551356
1357+ /// Convert the number of locals and a local index to an offset from the BP.
1358+ /// We don't move the SP register after entry, so we often use SP as BP.
1359+ pub fn local_size_and_idx_to_bp_offset ( local_size : usize , local_idx : usize ) -> i32 {
1360+ local_size_and_idx_to_ep_offset ( local_size, local_idx) + 1
1361+ }
1362+
13561363/// Convert ISEQ into High-level IR
13571364fn compile_iseq ( iseq : IseqPtr ) -> Option < Function > {
13581365 let mut function = match iseq_to_hir ( iseq) {
@@ -1448,26 +1455,41 @@ c_callable! {
14481455 /// This function is expected to be called repeatedly when ZJIT fails to compile the stub.
14491456 /// We should be able to compile most (if not all) function stubs by side-exiting at unsupported
14501457 /// instructions, so this should be used primarily for cb.has_dropped_bytes() situations.
1451- fn function_stub_hit( iseq_call_ptr: * const c_void, ec : EcPtr , sp: * mut VALUE ) -> * const u8 {
1458+ fn function_stub_hit( iseq_call_ptr: * const c_void, cfp : CfpPtr , sp: * mut VALUE ) -> * const u8 {
14521459 with_vm_lock( src_loc!( ) , || {
1453- // gen_push_frame() doesn't set PC and SP , so we need to set them before exit.
1460+ // gen_push_frame() doesn't set PC, so we need to set them before exit.
14541461 // function_stub_hit_body() may allocate and call gc_validate_pc(), so we always set PC.
14551462 let iseq_call = unsafe { Rc :: from_raw( iseq_call_ptr as * const RefCell <IseqCall >) } ;
1456- let cfp = unsafe { get_ec_cfp ( ec ) } ;
1457- let pc = unsafe { rb_iseq_pc_at_idx( iseq_call . borrow ( ) . iseq, 0 ) } ; // TODO: handle opt_pc once supported
1463+ let iseq = iseq_call . borrow ( ) . iseq ;
1464+ let pc = unsafe { rb_iseq_pc_at_idx( iseq, 0 ) } ; // TODO: handle opt_pc once supported
14581465 unsafe { rb_set_cfp_pc( cfp, pc) } ;
1459- unsafe { rb_set_cfp_sp( cfp, sp) } ;
1466+
1467+ // JIT-to-JIT calls don't set SP or fill nils to uninitialized (non-argument) locals.
1468+ // We need to set them if we side-exit from function_stub_hit.
1469+ fn spill_stack( iseq: IseqPtr , cfp: CfpPtr , sp: * mut VALUE ) {
1470+ unsafe {
1471+ // Set SP which gen_push_frame() doesn't set
1472+ rb_set_cfp_sp( cfp, sp) ;
1473+
1474+ // Fill nils to uninitialized (non-argument) locals
1475+ let local_size = get_iseq_body_local_table_size( iseq) as usize ;
1476+ let num_params = get_iseq_body_param_size( iseq) as usize ;
1477+ let base = sp. offset( -local_size_and_idx_to_bp_offset( local_size, num_params) as isize ) ;
1478+ slice:: from_raw_parts_mut( base, local_size - num_params) . fill( Qnil ) ;
1479+ }
1480+ }
14601481
14611482 // If we already know we can't compile the ISEQ, fail early without cb.mark_all_executable().
14621483 // TODO: Alan thinks the payload status part of this check can happen without the VM lock, since the whole
14631484 // code path can be made read-only. But you still need the check as is while holding the VM lock in any case.
14641485 let cb = ZJITState :: get_code_block( ) ;
1465- let payload = get_or_create_iseq_payload( iseq_call . borrow ( ) . iseq) ;
1486+ let payload = get_or_create_iseq_payload( iseq) ;
14661487 if cb. has_dropped_bytes( ) || payload. status == IseqStatus :: CantCompile {
14671488 // We'll use this Rc again, so increment the ref count decremented by from_raw.
14681489 unsafe { Rc :: increment_strong_count( iseq_call_ptr as * const RefCell <IseqCall >) ; }
14691490
14701491 // Exit to the interpreter
1492+ spill_stack( iseq, cfp, sp) ;
14711493 return ZJITState :: get_exit_trampoline( ) . raw_ptr( cb) ;
14721494 }
14731495
@@ -1477,6 +1499,7 @@ c_callable! {
14771499 code_ptr
14781500 } else {
14791501 // Exit to the interpreter
1502+ spill_stack( iseq, cfp, sp) ;
14801503 ZJITState :: get_exit_trampoline( )
14811504 } ;
14821505 cb. mark_all_executable( ) ;
@@ -1540,7 +1563,7 @@ pub fn gen_function_stub_hit_trampoline(cb: &mut CodeBlock) -> Option<CodePtr> {
15401563 const { assert ! ( ALLOC_REGS . len( ) % 2 == 0 , "x86_64 would need to push one more if we push an odd number of regs" ) ; }
15411564
15421565 // Compile the stubbed ISEQ
1543- let jump_addr = asm_ccall ! ( asm, function_stub_hit, SCRATCH_OPND , EC , SP ) ;
1566+ let jump_addr = asm_ccall ! ( asm, function_stub_hit, SCRATCH_OPND , CFP , SP ) ;
15441567 asm. mov ( SCRATCH_OPND , jump_addr) ;
15451568
15461569 asm_comment ! ( asm, "restore argument registers" ) ;
0 commit comments