From 48bb9e621a5aaaf10d7342b07919f3efda46bbbc Mon Sep 17 00:00:00 2001 From: FractalFir Date: Wed, 26 Nov 2025 20:46:46 +0100 Subject: [PATCH 1/3] WIP 'opaque' pointers, no atomics yet --- crates/rustc_codegen_nvvm/src/abi.rs | 7 +- crates/rustc_codegen_nvvm/src/allocator.rs | 6 +- crates/rustc_codegen_nvvm/src/builder.rs | 87 +++++++++++++++++----- crates/rustc_codegen_nvvm/src/ty.rs | 9 ++- 4 files changed, 77 insertions(+), 32 deletions(-) diff --git a/crates/rustc_codegen_nvvm/src/abi.rs b/crates/rustc_codegen_nvvm/src/abi.rs index 60147222..534e4dee 100644 --- a/crates/rustc_codegen_nvvm/src/abi.rs +++ b/crates/rustc_codegen_nvvm/src/abi.rs @@ -378,12 +378,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { - unsafe { - llvm::LLVMPointerType( - self.llvm_type(cx), - cx.data_layout().instruction_address_space.0 as c_uint, - ) - } + cx.type_i8p() } fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) { diff --git a/crates/rustc_codegen_nvvm/src/allocator.rs b/crates/rustc_codegen_nvvm/src/allocator.rs index 1403fde3..0798f1fb 100644 --- a/crates/rustc_codegen_nvvm/src/allocator.rs +++ b/crates/rustc_codegen_nvvm/src/allocator.rs @@ -140,14 +140,12 @@ pub(crate) unsafe fn codegen( unsafe { llvm::LLVMBuildRetVoid(llbuilder) }; unsafe { llvm::LLVMDisposeBuilder(llbuilder) }; - let ptr_ty = unsafe { llvm::LLVMPointerType(llvm::LLVMInt8TypeInContext(llcx), 0) }; - for used in &mut used { - *used = unsafe { llvm::LLVMConstBitCast(used, ptr_ty) }; + *used = unsafe { llvm::LLVMConstBitCast(used, i8p) }; } let section = c"llvm.metadata"; - let array = unsafe { llvm::LLVMConstArray(ptr_ty, used.as_ptr(), used.len() as u32) }; + let array = unsafe { llvm::LLVMConstArray(i8p, used.as_ptr(), used.len() as u32) }; let g = unsafe { llvm::LLVMAddGlobal(llmod, llvm::LLVMTypeOf(array), c"llvm.used".as_ptr().cast()) }; diff --git a/crates/rustc_codegen_nvvm/src/builder.rs b/crates/rustc_codegen_nvvm/src/builder.rs index 08eee9ab..8a156e9b 100644 --- a/crates/rustc_codegen_nvvm/src/builder.rs +++ b/crates/rustc_codegen_nvvm/src/builder.rs @@ -232,6 +232,7 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { // Get the return type. let sig = llvm::LLVMGetElementType(self.val_ty(self.llfn())); let return_ty = llvm::LLVMGetReturnType(sig); +; // Check if new_ty & return_ty are different pointers. // FIXME: get rid of this nonsense once we are past LLVM 7 and don't have // to suffer from typed pointers. @@ -481,6 +482,10 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { // Cast to default addrspace if necessary let alloca_ty = llvm::LLVMTypeOf(alloca); let alloca_addrspace = llvm::LLVMGetPointerAddressSpace(alloca_ty); + let alloca = self.pointercast( + alloca, + self.type_i8p_ext(rustc_abi::AddressSpace(alloca_addrspace)), + ); let dest_ty = self.cx().type_ptr(); let dest_addrspace = llvm::LLVMGetPointerAddressSpace(dest_ty); if alloca_addrspace != dest_addrspace { @@ -493,7 +498,9 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value { trace!("Load {ty:?} {:?}", ptr); - let ptr = self.pointercast(ptr, self.cx.type_ptr_to(ty)); + let ptr = self.pointercast(ptr, unsafe { + llvm::LLVMPointerType(ty, llvm::LLVMGetPointerAddressSpace(self.val_ty(ptr))) + }); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED); llvm::LLVMSetAlignment(load, align.bytes() as c_uint); @@ -503,7 +510,9 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value { trace!("Volatile load `{:?}`", ptr); - let ptr = self.pointercast(ptr, self.cx.type_ptr_to(ty)); + let ptr = self.pointercast(ptr, unsafe { + llvm::LLVMPointerType(ty, llvm::LLVMGetPointerAddressSpace(self.val_ty(ptr))) + }); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED); llvm::LLVMSetVolatile(load, llvm::True); @@ -711,14 +720,21 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { align: Align, flags: MemFlags, ) -> &'ll Value { + assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer); + let ptr = self.check_store(val, ptr); + let address_space = unsafe { llvm::LLVMGetPointerAddressSpace(self.val_ty(ptr)) }; + let store_pointer_ty = unsafe { llvm::LLVMPointerType(self.val_ty(val), address_space) }; + + let ptr = unsafe { + llvm::LLVMBuildBitCast(self.llbuilder, ptr, store_pointer_ty, c"NAME".as_ptr()) + }; trace!( "store_with_flags: {:?} into {:?} with align {:?}", val, ptr, align.bytes() ); - assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer); - let ptr = self.check_store(val, ptr); + unsafe { let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); let align = if flags.contains(MemFlags::UNALIGNED) { @@ -757,15 +773,20 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { trace!("gep: {ty:?} {:?} with indices {:?}", ptr, indices); - let ptr = self.pointercast(ptr, self.cx().type_ptr_to(ty)); + let address_space = unsafe { llvm::LLVMGetPointerAddressSpace(self.val_ty(ptr)) }; + let ptr = self.pointercast(ptr, unsafe { llvm::LLVMPointerType(ty, address_space) }); unsafe { - llvm::LLVMBuildGEP2( + let res = llvm::LLVMBuildGEP2( self.llbuilder, ty, ptr, indices.as_ptr(), indices.len() as c_uint, UNNAMED, + ); + self.pointercast( + ptr, + self.type_i8p_ext(rustc_abi::AddressSpace(address_space)), ) } } @@ -777,15 +798,20 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { indices: &[&'ll Value], ) -> &'ll Value { trace!("gep inbounds: {ty:?} {:?} with indices {:?}", ptr, indices); - let ptr = self.pointercast(ptr, self.cx().type_ptr_to(ty)); + let address_space = unsafe { llvm::LLVMGetPointerAddressSpace(self.val_ty(ptr)) }; + let ptr = self.pointercast(ptr, unsafe { llvm::LLVMPointerType(ty, address_space) }); unsafe { - llvm::LLVMBuildInBoundsGEP2( + let res = llvm::LLVMBuildInBoundsGEP2( self.llbuilder, ty, ptr, indices.as_ptr(), indices.len() as c_uint, UNNAMED, + ); + self.pointercast( + ptr, + self.type_i8p_ext(rustc_abi::AddressSpace(address_space)), ) } } @@ -1066,6 +1092,7 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn insert_value(&mut self, agg_val: &'ll Value, mut elt: &'ll Value, idx: u64) -> &'ll Value { trace!("insert value {:?}, {:?}, {:?}", agg_val, elt, idx); + assert_eq!(idx as c_uint as u64, idx); let elt_ty = self.cx.val_ty(elt); @@ -1168,9 +1195,13 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ); } }; + let tuple = self.type_struct(&[self.val_ty(src),self.type_i1()], false); let res = self.atomic_op( dst, - |builder, dst| { + tuple, + |builder, dst,ty| { + builder.abort(); + return builder.const_undef(ty); // We are in a supported address space - just use ordinary atomics unsafe { llvm::LLVMRustBuildAtomicCmpXchg( @@ -1184,7 +1215,7 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) } }, - |builder, dst| { + |builder, dst,ty| { // Local space is only accessible to the current thread. // So, there are no synchronization issues, and we can emulate it using a simple load / compare / store. let load: &'ll Value = @@ -1221,8 +1252,13 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } self.atomic_op( dst, - |builder, dst| { + self.val_ty(src), + |builder, dst,ty| { // We are in a supported address space - just use ordinary atomics + let address_space = unsafe { llvm::LLVMGetPointerAddressSpace(builder.val_ty(dst)) }; + let dst_ty = unsafe { llvm::LLVMPointerType(ty, address_space) }; + let dst = builder.pointercast(dst,dst_ty); + let src = if matches!(op, AtomicRmwBinOp::AtomicXchg) {builder.pointercast(src,dst_ty)} else {src}; unsafe { llvm::LLVMBuildAtomicRMW( builder.llbuilder, @@ -1234,7 +1270,7 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) } }, - |builder, dst| { + |builder, dst,ty| { // Local space is only accessible to the current thread. // So, there are no synchronization issues, and we can emulate it using a simple load / compare / store. let load: &'ll Value = @@ -1314,6 +1350,15 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let args = self.check_call("call", llty, llfn, args); let mut call = unsafe { + let llfn = if self.cx.type_kind(llty) == TypeKind::Pointer { + self.pointercast(llfn, llty) + } else if self.cx.type_kind(self.val_ty(llfn)) == TypeKind::Pointer { + let target_fnptr = llvm::LLVMPointerType(llty, 0); + self.pointercast(llfn, target_fnptr) + } else { + llfn + }; + llvm::LLVMRustBuildCall( self.llbuilder, llfn, @@ -1719,7 +1764,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { if !self.cx().sess().emit_lifetime_markers() { return; } - self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]); } @@ -1750,9 +1794,16 @@ impl<'ll, 'tcx, 'a> Builder<'a, 'll, 'tcx> { fn atomic_op( &mut self, dst: &'ll Value, - atomic_supported: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value) -> &'ll Value, - emulate_local: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value) -> &'ll Value, + ty:&'ll Type, + atomic_supported: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value,&'ll Type) -> &'ll Value, + emulate_local: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value,&'ll Type) -> &'ll Value, ) -> &'ll Value { + + let emulate_local = |builder:&mut Self,_,_|{ + // ATOMICS don't work with untyped pointers *YET*. + builder.abort(); + builder.const_undef(ty) + }; // (FractalFir) Atomics in CUDA have some limitations, and we have to work around them. // For example, they are restricted in what address space they operate on. // CUDA has 4 address spaces(and a generic one, which is an union of all of those). @@ -1803,7 +1854,7 @@ impl<'ll, 'tcx, 'a> Builder<'a, 'll, 'tcx> { let merge_bb = self.append_sibling_block("atomic_op_done"); // Execute atomic op if supported, then jump to merge self.switch_to_block(supported_bb); - let supported_res = atomic_supported(self, dst); + let supported_res = atomic_supported(self, dst, ty); self.br(merge_bb); // Check if the pointer is in the thread space. If so, we can emulate it. self.switch_to_block(unsupported_bb); @@ -1822,7 +1873,7 @@ impl<'ll, 'tcx, 'a> Builder<'a, 'll, 'tcx> { self.cond_br(isspacep_local, local_bb, atomic_ub_bb); // The pointer is in the thread(local) space. self.switch_to_block(local_bb); - let local_res = emulate_local(self, dst); + let local_res = emulate_local(self, dst,ty); self.br(merge_bb); // The pointer is neither in the supported address space, nor the local space. // This is very likely UB. So, we trap here. @@ -1830,7 +1881,7 @@ impl<'ll, 'tcx, 'a> Builder<'a, 'll, 'tcx> { self.switch_to_block(atomic_ub_bb); self.abort(); self.unreachable(); - // Atomic is impl has finished, and we can now switch to the merge_bb + // Atomic impl has finished, and we can now switch to the merge_bb self.switch_to_block(merge_bb); self.phi( self.val_ty(local_res), diff --git a/crates/rustc_codegen_nvvm/src/ty.rs b/crates/rustc_codegen_nvvm/src/ty.rs index b88a2dea..d3a1084f 100644 --- a/crates/rustc_codegen_nvvm/src/ty.rs +++ b/crates/rustc_codegen_nvvm/src/ty.rs @@ -108,7 +108,7 @@ impl<'ll> CodegenCx<'ll, '_> { pub(crate) fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } } - + #[track_caller] pub(crate) fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { assert_ne!( self.type_kind(ty), @@ -116,11 +116,12 @@ impl<'ll> CodegenCx<'ll, '_> { "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense" ); - unsafe { llvm::LLVMPointerType(ty, AddressSpace::ZERO.0) } + unsafe { self.type_ptr_to_ext(ty, AddressSpace::ZERO) } } - + #[track_caller] pub(crate) fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type { - unsafe { llvm::LLVMPointerType(ty, address_space.0) } + //assert_eq!(ty,self.type_ix(8),"rustc_codegen_nvvm uses opaque pointers - specifying pointer type other than `i8` is not valid!"); + unsafe { llvm::LLVMPointerType(self.type_ix(8), address_space.0) } } pub(crate) fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { From eed781ac3eb2682bc8818f3311ae366cb252c5b6 Mon Sep 17 00:00:00 2001 From: FractalFir Date: Sat, 29 Nov 2025 18:20:55 +0100 Subject: [PATCH 2/3] Fully working atomics with opaque pointers --- crates/rustc_codegen_nvvm/src/builder.rs | 51 +++++++++++++----------- 1 file changed, 28 insertions(+), 23 deletions(-) diff --git a/crates/rustc_codegen_nvvm/src/builder.rs b/crates/rustc_codegen_nvvm/src/builder.rs index 8a156e9b..2bc70a0b 100644 --- a/crates/rustc_codegen_nvvm/src/builder.rs +++ b/crates/rustc_codegen_nvvm/src/builder.rs @@ -232,7 +232,6 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { // Get the return type. let sig = llvm::LLVMGetElementType(self.val_ty(self.llfn())); let return_ty = llvm::LLVMGetReturnType(sig); -; // Check if new_ty & return_ty are different pointers. // FIXME: get rid of this nonsense once we are past LLVM 7 and don't have // to suffer from typed pointers. @@ -1195,13 +1194,15 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ); } }; - let tuple = self.type_struct(&[self.val_ty(src),self.type_i1()], false); + let tuple = self.type_struct(&[self.val_ty(src), self.type_i1()], false); let res = self.atomic_op( dst, tuple, - |builder, dst,ty| { - builder.abort(); - return builder.const_undef(ty); + |builder, dst, ty| { + let address_space = + unsafe { llvm::LLVMGetPointerAddressSpace(builder.val_ty(dst)) }; + let dst_ty = unsafe { llvm::LLVMPointerType(builder.val_ty(cmp), address_space) }; + let dst = builder.pointercast(dst, dst_ty); // We are in a supported address space - just use ordinary atomics unsafe { llvm::LLVMRustBuildAtomicCmpXchg( @@ -1215,7 +1216,13 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) } }, - |builder, dst,ty| { + |builder, dst, ty| { + let dst = builder.pointercast(dst, unsafe { + llvm::LLVMPointerType( + builder.val_ty(cmp), + llvm::LLVMGetPointerAddressSpace(builder.val_ty(dst)), + ) + }); // Local space is only accessible to the current thread. // So, there are no synchronization issues, and we can emulate it using a simple load / compare / store. let load: &'ll Value = @@ -1253,12 +1260,12 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { self.atomic_op( dst, self.val_ty(src), - |builder, dst,ty| { + |builder, dst, ty| { // We are in a supported address space - just use ordinary atomics - let address_space = unsafe { llvm::LLVMGetPointerAddressSpace(builder.val_ty(dst)) }; + let address_space = + unsafe { llvm::LLVMGetPointerAddressSpace(builder.val_ty(dst)) }; let dst_ty = unsafe { llvm::LLVMPointerType(ty, address_space) }; - let dst = builder.pointercast(dst,dst_ty); - let src = if matches!(op, AtomicRmwBinOp::AtomicXchg) {builder.pointercast(src,dst_ty)} else {src}; + let dst = builder.pointercast(dst, dst_ty); unsafe { llvm::LLVMBuildAtomicRMW( builder.llbuilder, @@ -1270,9 +1277,13 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) } }, - |builder, dst,ty| { + |builder, dst, ty| { // Local space is only accessible to the current thread. // So, there are no synchronization issues, and we can emulate it using a simple load / compare / store. + let dst = builder.pointercast(dst, unsafe { + llvm::LLVMPointerType(ty, llvm::LLVMGetPointerAddressSpace(builder.val_ty(dst))) + }); + let load: &'ll Value = unsafe { llvm::LLVMBuildLoad(builder.llbuilder, dst, UNNAMED) }; let next_val = match op { @@ -1352,13 +1363,13 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let mut call = unsafe { let llfn = if self.cx.type_kind(llty) == TypeKind::Pointer { self.pointercast(llfn, llty) - } else if self.cx.type_kind(self.val_ty(llfn)) == TypeKind::Pointer { + } else if self.cx.type_kind(self.val_ty(llfn)) == TypeKind::Pointer { let target_fnptr = llvm::LLVMPointerType(llty, 0); self.pointercast(llfn, target_fnptr) } else { llfn }; - + llvm::LLVMRustBuildCall( self.llbuilder, llfn, @@ -1794,16 +1805,10 @@ impl<'ll, 'tcx, 'a> Builder<'a, 'll, 'tcx> { fn atomic_op( &mut self, dst: &'ll Value, - ty:&'ll Type, - atomic_supported: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value,&'ll Type) -> &'ll Value, - emulate_local: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value,&'ll Type) -> &'ll Value, + ty: &'ll Type, + atomic_supported: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value, &'ll Type) -> &'ll Value, + emulate_local: impl FnOnce(&mut Builder<'a, 'll, 'tcx>, &'ll Value, &'ll Type) -> &'ll Value, ) -> &'ll Value { - - let emulate_local = |builder:&mut Self,_,_|{ - // ATOMICS don't work with untyped pointers *YET*. - builder.abort(); - builder.const_undef(ty) - }; // (FractalFir) Atomics in CUDA have some limitations, and we have to work around them. // For example, they are restricted in what address space they operate on. // CUDA has 4 address spaces(and a generic one, which is an union of all of those). @@ -1873,7 +1878,7 @@ impl<'ll, 'tcx, 'a> Builder<'a, 'll, 'tcx> { self.cond_br(isspacep_local, local_bb, atomic_ub_bb); // The pointer is in the thread(local) space. self.switch_to_block(local_bb); - let local_res = emulate_local(self, dst,ty); + let local_res = emulate_local(self, dst, ty); self.br(merge_bb); // The pointer is neither in the supported address space, nor the local space. // This is very likely UB. So, we trap here. From ecb7537299083c82dade1ce53bb8e87f821b3c2e Mon Sep 17 00:00:00 2001 From: FractalFir Date: Sat, 29 Nov 2025 19:20:24 +0100 Subject: [PATCH 3/3] clippy fixes + added an assertion to ensure typed pointers are *NOT* used --- crates/rustc_codegen_nvvm/src/abi.rs | 13 ++---- crates/rustc_codegen_nvvm/src/builder.rs | 50 ++++++++--------------- crates/rustc_codegen_nvvm/src/const_ty.rs | 5 +-- crates/rustc_codegen_nvvm/src/ty.rs | 44 +++++++++----------- 4 files changed, 42 insertions(+), 70 deletions(-) diff --git a/crates/rustc_codegen_nvvm/src/abi.rs b/crates/rustc_codegen_nvvm/src/abi.rs index 534e4dee..21c5ca33 100644 --- a/crates/rustc_codegen_nvvm/src/abi.rs +++ b/crates/rustc_codegen_nvvm/src/abi.rs @@ -4,7 +4,7 @@ use libc::c_uint; use rustc_abi::BackendRepr::Scalar; use rustc_abi::CanonAbi; use rustc_abi::Size; -use rustc_abi::{HasDataLayout, Primitive, Reg, RegKind}; +use rustc_abi::{Primitive, Reg, RegKind}; use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::mir::operand::OperandValue; use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue}; @@ -305,7 +305,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Cast { cast, .. } => cast.llvm_type(cx), PassMode::Indirect { .. } => { idx += 1; - llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + llargument_tys.push(cx.type_i8p()); cx.type_void() } }; @@ -353,7 +353,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { attrs: _, meta_attrs: None, on_stack: _, - } => cx.type_ptr_to(arg.memory_ty(cx)), + } => cx.type_i8p(), }; let (new, changed) = get_transformed_type(cx, llarg_ty); if changed { @@ -564,7 +564,6 @@ impl<'tcx> AbiBuilderMethods for Builder<'_, '_, 'tcx> { } pub(crate) trait ArgAbiExt<'ll, 'tcx> { - fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; fn store( &self, bx: &mut Builder<'_, 'll, 'tcx>, @@ -580,12 +579,6 @@ pub(crate) trait ArgAbiExt<'ll, 'tcx> { } impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { - /// Gets the LLVM type for a place of the original Rust type of - /// this argument/return, i.e., the result of `type_of::type_of`. - fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { - self.layout.llvm_type(cx) - } - /// Stores a direct/indirect value described by this ArgAbi into a /// place for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables diff --git a/crates/rustc_codegen_nvvm/src/builder.rs b/crates/rustc_codegen_nvvm/src/builder.rs index 2bc70a0b..a0940704 100644 --- a/crates/rustc_codegen_nvvm/src/builder.rs +++ b/crates/rustc_codegen_nvvm/src/builder.rs @@ -4,7 +4,7 @@ use std::ptr; use libc::{c_char, c_uint}; use rustc_abi as abi; -use rustc_abi::{AddressSpace, Align, HasDataLayout, Size, TargetDataLayout, WrappingRange}; +use rustc_abi::{Align, HasDataLayout, Size, TargetDataLayout, WrappingRange}; use rustc_codegen_ssa::MemFlags; use rustc_codegen_ssa::common::{AtomicRmwBinOp, IntPredicate, RealPredicate, TypeKind}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; @@ -720,7 +720,7 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { flags: MemFlags, ) -> &'ll Value { assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer); - let ptr = self.check_store(val, ptr); + let address_space = unsafe { llvm::LLVMGetPointerAddressSpace(self.val_ty(ptr)) }; let store_pointer_ty = unsafe { llvm::LLVMPointerType(self.val_ty(val), address_space) }; @@ -784,7 +784,7 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { UNNAMED, ); self.pointercast( - ptr, + res, self.type_i8p_ext(rustc_abi::AddressSpace(address_space)), ) } @@ -809,7 +809,7 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { UNNAMED, ); self.pointercast( - ptr, + res, self.type_i8p_ext(rustc_abi::AddressSpace(address_space)), ) } @@ -917,20 +917,20 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn bitcast(&mut self, mut val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { trace!("Bitcast `{:?}` to ty `{:?}`", val, dest_ty); - unsafe { - let ty = self.val_ty(val); - let kind = llvm::LLVMRustGetTypeKind(ty); - if kind == llvm::TypeKind::Pointer { - let element = self.element_type(ty); - let addrspace = llvm::LLVMGetPointerAddressSpace(ty); - let new_ty = self.type_ptr_to_ext(element, AddressSpace::ZERO); - if addrspace != 0 { - trace!("injecting addrspace cast for `{:?}` to `{:?}`", ty, new_ty); - val = llvm::LLVMBuildAddrSpaceCast(self.llbuilder, val, new_ty, UNNAMED); - } + + let ty = self.val_ty(val); + let kind = unsafe { llvm::LLVMRustGetTypeKind(ty) }; + + if kind == llvm::TypeKind::Pointer { + let element = self.element_type(ty); + let addrspace = unsafe { llvm::LLVMGetPointerAddressSpace(ty) }; + let new_ty = unsafe { llvm::LLVMPointerType(element, 0) }; + if addrspace != 0 { + trace!("injecting addrspace cast for `{:?}` to `{:?}`", ty, new_ty); + val = unsafe { llvm::LLVMBuildAddrSpaceCast(self.llbuilder, val, new_ty, UNNAMED) }; } - llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) } + unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) } } fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { @@ -1198,7 +1198,7 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let res = self.atomic_op( dst, tuple, - |builder, dst, ty| { + |builder, dst, _| { let address_space = unsafe { llvm::LLVMGetPointerAddressSpace(builder.val_ty(dst)) }; let dst_ty = unsafe { llvm::LLVMPointerType(builder.val_ty(cmp), address_space) }; @@ -1216,7 +1216,7 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) } }, - |builder, dst, ty| { + |builder, dst, _| { let dst = builder.pointercast(dst, unsafe { llvm::LLVMPointerType( builder.val_ty(cmp), @@ -1698,20 +1698,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { fn noundef_metadata(&mut self, _load: &'ll Value) {} - fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { - let dest_ptr_ty = self.cx.val_ty(ptr); - let stored_ty = self.cx.val_ty(val); - let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); - - assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); - - if dest_ptr_ty == stored_ptr_ty { - ptr - } else { - self.bitcast(ptr, stored_ptr_ty) - } - } - fn check_call<'b>( &mut self, typ: &str, diff --git a/crates/rustc_codegen_nvvm/src/const_ty.rs b/crates/rustc_codegen_nvvm/src/const_ty.rs index 10b28191..92734105 100644 --- a/crates/rustc_codegen_nvvm/src/const_ty.rs +++ b/crates/rustc_codegen_nvvm/src/const_ty.rs @@ -1,5 +1,5 @@ use crate::llvm::{self, Bool, False, True, Type, Value}; -use crate::{consts::const_alloc_to_llvm, context::CodegenCx, ty::LayoutLlvmExt}; +use crate::{consts::const_alloc_to_llvm, context::CodegenCx}; use libc::c_uint; use rustc_abi as abi; use rustc_abi::Primitive::Pointer; @@ -11,7 +11,6 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_hashes::Hash128; use rustc_middle::bug; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; -use rustc_middle::ty::layout::LayoutOf; use tracing::trace; impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { @@ -99,7 +98,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { g }); let len = s.len(); - let ty = self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)); + let ty = self.type_i8p(); let cs = unsafe { llvm::LLVMConstPointerCast(val, ty) }; (cs, self.const_usize(len as u64)) } diff --git a/crates/rustc_codegen_nvvm/src/ty.rs b/crates/rustc_codegen_nvvm/src/ty.rs index d3a1084f..8a31c60f 100644 --- a/crates/rustc_codegen_nvvm/src/ty.rs +++ b/crates/rustc_codegen_nvvm/src/ty.rs @@ -55,9 +55,7 @@ impl Type { impl<'ll> CodegenCx<'ll, '_> { pub(crate) fn voidp(&self) -> &'ll Type { - // llvm uses i8* for void ptrs, void* is invalid - let i8_ty = self.type_i8(); - self.type_ptr_to_ext(i8_ty, AddressSpace::ZERO) + self.type_i8p() } pub(crate) fn type_named_struct(&self, name: &str) -> &'ll Type { @@ -116,11 +114,15 @@ impl<'ll> CodegenCx<'ll, '_> { "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense" ); - unsafe { self.type_ptr_to_ext(ty, AddressSpace::ZERO) } + self.type_ptr_to_ext(ty, AddressSpace::ZERO) } #[track_caller] pub(crate) fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type { - //assert_eq!(ty,self.type_ix(8),"rustc_codegen_nvvm uses opaque pointers - specifying pointer type other than `i8` is not valid!"); + assert_eq!( + ty, + self.type_ix(8), + "rustc_codegen_nvvm uses opaque pointers - specifying pointer type other than `i8` is not valid!" + ); unsafe { llvm::LLVMPointerType(self.type_ix(8), address_space.0) } } @@ -134,11 +136,6 @@ impl<'ll> CodegenCx<'ll, '_> { } } - pub(crate) fn type_pointee_for_align(&self, align: Align) -> &'ll Type { - let ity = Integer::approximate_align(self, align); - self.type_from_integer(ity) - } - /// Return a LLVM type that has at most the required alignment, /// and exactly the required size, as a best-effort padding array. pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type { @@ -220,7 +217,11 @@ impl<'ll, 'tcx> BaseTypeCodegenMethods for CodegenCx<'ll, 'tcx> { } fn element_type(&self, ty: &'ll Type) -> &'ll Type { - unsafe { llvm::LLVMGetElementType(ty) } + let res = unsafe { llvm::LLVMGetElementType(ty) }; + if self.type_kind(ty) == TypeKind::Pointer { + //assert_eq!(self.type_kind(res),TypeKind::Function, "{ty:?} is a pointer, and getting its pointee is a nonsense operation."); + } + res } fn vector_length(&self, ty: &'ll Type) -> usize { @@ -341,12 +342,8 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { } let llty = match *self.ty.kind() { - ty::Ref(_, ty, _) | ty::RawPtr(ty, _) => { - cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx)) - } - ty::Adt(def, _) if def.is_box() => { - cx.type_ptr_to(cx.layout_of(self.ty.expect_boxed_ty()).llvm_type(cx)) - } + ty::Ref(_, _, _) | ty::RawPtr(_, _) => cx.type_i8p(), + ty::Adt(def, _) if def.is_box() => cx.type_i8p(), ty::FnPtr(sig, hdr) => { cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig.with(hdr), ty::List::empty())) } @@ -427,17 +424,14 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { Float(f) => cx.type_from_float(f), Pointer(address_space) => { // If we know the alignment, pick something better than i8. - let (pointee, address_space) = if let Some(PointeeInfo { - safe: Some(_), - align, - .. - }) = self.pointee_info_at(cx, Size::ZERO) + let address_space = if let Some(PointeeInfo { safe: Some(_), .. }) = + self.pointee_info_at(cx, Size::ZERO) { - (cx.type_pointee_for_align(align), address_space) + address_space } else { - (cx.type_i8(), AddressSpace::ZERO) + AddressSpace::ZERO }; - cx.type_ptr_to_ext(pointee, address_space) + cx.type_i8p_ext(address_space) } } }