diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index 93202483eed81..1983941c657f6 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -1,6 +1,6 @@ use std::fmt::Write; -use gccjit::{Struct, Type}; +use gccjit::{RValue, Struct, Type}; use rustc_abi as abi; use rustc_abi::Primitive::*; use rustc_abi::{ @@ -373,7 +373,11 @@ impl<'gcc, 'tcx> LayoutTypeCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { unimplemented!(); } - fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> { + fn fn_decl_backend_type( + &self, + fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + _fn_ptr: RValue<'gcc>, + ) -> Type<'gcc> { // FIXME(antoyo): Should we do something with `FnAbiGcc::fn_attributes`? let FnAbiGcc { return_type, arguments_type, is_c_variadic, .. } = fn_abi.gcc_type(self); self.context.new_function_pointer_type(None, return_type, &arguments_type, is_c_variadic) diff --git a/compiler/rustc_codegen_llvm/messages.ftl b/compiler/rustc_codegen_llvm/messages.ftl index ce9a51b539d83..fe996c219fb0e 100644 --- a/compiler/rustc_codegen_llvm/messages.ftl +++ b/compiler/rustc_codegen_llvm/messages.ftl @@ -9,6 +9,13 @@ codegen_llvm_from_llvm_diag = {$message} codegen_llvm_from_llvm_optimization_diag = {$filename}:{$line}:{$column} {$pass_name} ({$kind}): {$message} +codegen_llvm_intrinsic_signature_mismatch = + intrinsic signature mismatch for `{$name}`: expected signature `{$llvm_fn_ty}`, found `{$rust_fn_ty}` + +codegen_llvm_intrinsic_wrong_arch = + intrinsic `{$name}` cannot be used with target arch `{$target_arch}` + + codegen_llvm_load_bitcode = failed to load bitcode of module "{$name}" codegen_llvm_load_bitcode_with_llvm_err = failed to load bitcode of module "{$name}": {$llvm_err} diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 680ad98593e7e..0368144678944 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -1,4 +1,5 @@ -use std::cmp; +use std::borrow::Borrow; +use std::{cmp, iter}; use libc::c_uint; use rustc_abi::{ @@ -6,6 +7,7 @@ use rustc_abi::{ X86Call, }; use rustc_codegen_ssa::MemFlags; +use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue}; use rustc_codegen_ssa::traits::*; @@ -21,7 +23,7 @@ use smallvec::SmallVec; use crate::attributes::{self, llfn_attrs_from_instance}; use crate::builder::Builder; -use crate::context::CodegenCx; +use crate::context::{CodegenCx, GenericCx, SCx}; use crate::llvm::{self, Attribute, AttributePlace, Type, Value}; use crate::llvm_util; use crate::type_of::LayoutLlvmExt; @@ -301,8 +303,46 @@ impl<'ll, 'tcx> ArgAbiBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } } +pub(crate) enum FunctionSignature<'ll> { + /// This is an LLVM intrinsic, the signature is obtained directly from LLVM, and **may not match the Rust signature** + LLVMSignature(llvm::Intrinsic, &'ll Type), + /// This is an LLVM intrinsic, but the signature is just the Rust signature. + /// FIXME: this should ideally not exist, we should be using the LLVM signature for all LLVM intrinsics + RustSignature(llvm::Intrinsic, &'ll Type), + /// The name starts with `llvm.`, but can't obtain the intrinsic ID. May be invalid or upgradable + MaybeInvalid(&'ll Type), + /// Just the Rust signature + NotIntrinsic(&'ll Type), +} + +impl<'ll> FunctionSignature<'ll> { + pub(crate) fn fn_ty(&self) -> &'ll Type { + match self { + FunctionSignature::LLVMSignature(_, fn_ty) + | FunctionSignature::RustSignature(_, fn_ty) + | FunctionSignature::MaybeInvalid(fn_ty) + | FunctionSignature::NotIntrinsic(fn_ty) => fn_ty, + } + } + + pub(crate) fn intrinsic(&self) -> Option { + match self { + FunctionSignature::RustSignature(intrinsic, _) + | FunctionSignature::LLVMSignature(intrinsic, _) => Some(*intrinsic), + _ => None, + } + } +} + pub(crate) trait FnAbiLlvmExt<'ll, 'tcx> { - fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; + fn llvm_return_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; + fn llvm_argument_types(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<&'ll Type>; + fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>, name: &[u8]) -> FunctionSignature<'ll>; + /// The normal Rust signature for this + fn rust_signature(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; + /// **If this function is an LLVM intrinsic** checks if the LLVM signature provided matches with this + fn verify_intrinsic_signature(&self, cx: &CodegenCx<'ll, 'tcx>, llvm_ty: &'ll Type) -> bool; + fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; fn llvm_cconv(&self, cx: &CodegenCx<'ll, 'tcx>) -> llvm::CallConv; @@ -315,30 +355,83 @@ pub(crate) trait FnAbiLlvmExt<'ll, 'tcx> { ); /// Apply attributes to a function call. - fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value); + fn apply_attrs_callsite( + &self, + bx: &mut Builder<'_, 'll, 'tcx>, + callsite: &'ll Value, + llfn: &'ll Value, + ); +} + +impl<'ll, CX: Borrow>> GenericCx<'ll, CX> { + pub(crate) fn equate_ty(&self, rust_ty: &'ll Type, llvm_ty: &'ll Type) -> bool { + if rust_ty == llvm_ty { + return true; + } + + match self.type_kind(llvm_ty) { + TypeKind::BFloat => rust_ty == self.type_i16(), + + // Some LLVM intrinsics return **non-packed** structs, but they can't be mimicked from Rust + // due to auto field-alignment in non-packed structs (packed structs are represented in LLVM + // as, well, packed structs, so they won't match with those either) + TypeKind::Struct if self.type_kind(rust_ty) == TypeKind::Struct => { + let rust_element_tys = self.struct_element_types(rust_ty); + let llvm_element_tys = self.struct_element_types(llvm_ty); + + if rust_element_tys.len() != llvm_element_tys.len() { + return false; + } + + iter::zip(rust_element_tys, llvm_element_tys).all( + |(rust_element_ty, llvm_element_ty)| { + self.equate_ty(rust_element_ty, llvm_element_ty) + }, + ) + } + TypeKind::Vector => { + let element_count = self.vector_length(llvm_ty) as u64; + let llvm_element_ty = self.element_type(llvm_ty); + + if llvm_element_ty == self.type_bf16() { + rust_ty == self.type_vector(self.type_i16(), element_count) + } else if llvm_element_ty == self.type_i1() { + let int_width = element_count.next_power_of_two().max(8); + rust_ty == self.type_ix(int_width) + } else { + false + } + } + _ => false, + } + } } impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { - fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { + fn llvm_return_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { + match &self.ret.mode { + PassMode::Ignore => cx.type_void(), + PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx), + PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx), + PassMode::Indirect { .. } => cx.type_void(), + } + } + + fn llvm_argument_types(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<&'ll Type> { + let indirect_return = matches!(self.ret.mode, PassMode::Indirect { .. }); + // Ignore "extra" args from the call site for C variadic functions. // Only the "fixed" args are part of the LLVM function signature. let args = if self.c_variadic { &self.args[..self.fixed_count as usize] } else { &self.args }; // This capacity calculation is approximate. - let mut llargument_tys = Vec::with_capacity( - self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 }, - ); + let mut llargument_tys = + Vec::with_capacity(args.len() + if indirect_return { 1 } else { 0 }); - let llreturn_ty = match &self.ret.mode { - PassMode::Ignore => cx.type_void(), - PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx), - PassMode::Cast { cast, pad_i32: _ } => cast.llvm_type(cx), - PassMode::Indirect { .. } => { - llargument_tys.push(cx.type_ptr()); - cx.type_void() - } - }; + if indirect_return { + llargument_tys.push(cx.type_ptr()); + } for arg in args { // Note that the exact number of arguments pushed here is carefully synchronized with @@ -385,10 +478,56 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { llargument_tys.push(llarg_ty); } + llargument_tys + } + + fn verify_intrinsic_signature(&self, cx: &CodegenCx<'ll, 'tcx>, llvm_fn_ty: &'ll Type) -> bool { + let rust_return_ty = self.llvm_return_type(cx); + let rust_argument_tys = self.llvm_argument_types(cx); + + let llvm_return_ty = cx.get_return_type(llvm_fn_ty); + let llvm_argument_tys = cx.func_params_types(llvm_fn_ty); + let llvm_is_variadic = cx.func_is_variadic(llvm_fn_ty); + + if self.c_variadic != llvm_is_variadic || rust_argument_tys.len() != llvm_argument_tys.len() + { + return false; + } + + iter::once((rust_return_ty, llvm_return_ty)) + .chain(iter::zip(rust_argument_tys, llvm_argument_tys)) + .all(|(rust_ty, llvm_ty)| cx.equate_ty(rust_ty, llvm_ty)) + } + + fn rust_signature(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { + let return_ty = self.llvm_return_type(cx); + let argument_tys = self.llvm_argument_types(cx); + if self.c_variadic { - cx.type_variadic_func(&llargument_tys, llreturn_ty) + cx.type_variadic_func(&argument_tys, return_ty) } else { - cx.type_func(&llargument_tys, llreturn_ty) + cx.type_func(&argument_tys, return_ty) + } + } + + fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>, name: &[u8]) -> FunctionSignature<'ll> { + if name.starts_with(b"llvm.") { + if let Some(intrinsic) = llvm::Intrinsic::lookup(name) { + if !intrinsic.is_overloaded() { + // FIXME: also do this for overloaded intrinsics + FunctionSignature::LLVMSignature(intrinsic, intrinsic.get_type(cx.llcx, &[])) + } else { + FunctionSignature::RustSignature(intrinsic, self.rust_signature(cx)) + } + } else { + // it's one of 2 cases, + // - either the base name is invalid + // - it has been superseded by something else, so the intrinsic was removed entirely + // to check for upgrades, we need the `llfn`, so we defer it for now + FunctionSignature::MaybeInvalid(self.rust_signature(cx)) + } + } else { + FunctionSignature::NotIntrinsic(self.rust_signature(cx)) } } @@ -546,7 +685,17 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } } - fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) { + fn apply_attrs_callsite( + &self, + bx: &mut Builder<'_, 'll, 'tcx>, + callsite: &'ll Value, + llfn: &'ll Value, + ) { + // Don't apply any attributes to LLVM intrinsics, they will be applied by AutoUpgrade + if llvm::get_value_name(llfn).starts_with(b"llvm.") { + return; + } + let mut func_attrs = SmallVec::<[_; 2]>::new(); if self.ret.layout.is_uninhabited() { func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx)); diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index e65f3d292dbb8..192305afa841c 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -68,7 +68,6 @@ impl<'a, 'll> SBuilder<'a, 'll> { ) -> &'ll Value { debug!("call {:?} with args ({:?})", llfn, args); - let args = self.check_call("call", llty, llfn, args); let funclet_bundle = funclet.map(|funclet| funclet.bundle()); let mut bundles: SmallVec<[_; 2]> = SmallVec::new(); if let Some(funclet_bundle) = funclet_bundle { @@ -412,7 +411,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) -> &'ll Value { debug!("invoke {:?} with args ({:?})", llfn, args); - let args = self.check_call("invoke", llty, llfn, args); + let args = self.cast_arguments("invoke", llty, llfn, args, fn_abi.is_some()); let funclet_bundle = funclet.map(|funclet| funclet.bundle()); let mut bundles: SmallVec<[_; 2]> = SmallVec::new(); if let Some(funclet_bundle) = funclet_bundle { @@ -443,9 +442,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) }; if let Some(fn_abi) = fn_abi { - fn_abi.apply_attrs_callsite(self, invoke); + fn_abi.apply_attrs_callsite(self, invoke, llfn); + self.cast_return(fn_abi, llfn, invoke) + } else { + invoke } - invoke } fn unreachable(&mut self) { @@ -1393,7 +1394,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) -> &'ll Value { debug!("call {:?} with args ({:?})", llfn, args); - let args = self.check_call("call", llty, llfn, args); + let args = self.cast_arguments("call", llty, llfn, args, fn_abi.is_some()); let funclet_bundle = funclet.map(|funclet| funclet.bundle()); let mut bundles: SmallVec<[_; 2]> = SmallVec::new(); if let Some(funclet_bundle) = funclet_bundle { @@ -1445,9 +1446,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } if let Some(fn_abi) = fn_abi { - fn_abi.apply_attrs_callsite(self, call); + fn_abi.apply_attrs_callsite(self, call, llfn); + self.cast_return(fn_abi, llfn, call) + } else { + call } - call } fn tail_call( @@ -1628,47 +1631,6 @@ impl<'a, 'll, CX: Borrow>> GenericBuilder<'a, 'll, CX> { ret.expect("LLVM does not have support for catchret") } - fn check_call<'b>( - &mut self, - typ: &str, - fn_ty: &'ll Type, - llfn: &'ll Value, - args: &'b [&'ll Value], - ) -> Cow<'b, [&'ll Value]> { - assert!( - self.cx.type_kind(fn_ty) == TypeKind::Function, - "builder::{typ} not passed a function, but {fn_ty:?}" - ); - - let param_tys = self.cx.func_params_types(fn_ty); - - let all_args_match = iter::zip(¶m_tys, args.iter().map(|&v| self.cx.val_ty(v))) - .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); - - if all_args_match { - return Cow::Borrowed(args); - } - - let casted_args: Vec<_> = iter::zip(param_tys, args) - .enumerate() - .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = self.cx.val_ty(actual_val); - if expected_ty != actual_ty { - debug!( - "type mismatch in function call of {:?}. \ - Expected {:?} for param {}, got {:?}; injecting bitcast", - llfn, expected_ty, i, actual_ty - ); - self.bitcast(actual_val, expected_ty) - } else { - actual_val - } - }) - .collect(); - - Cow::Owned(casted_args) - } - pub(crate) fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) } } @@ -1724,6 +1686,140 @@ impl<'a, 'll, CX: Borrow>> GenericBuilder<'a, 'll, CX> { } } impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { + fn trunc_int_to_i1_vector(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + let vector_length = self.vector_length(dest_ty) as u64; + let int_width = vector_length.next_power_of_two().max(8); + + let bitcasted = self.bitcast(val, self.type_vector(self.type_i1(), int_width)); + if vector_length == int_width { + bitcasted + } else { + let shuffle_mask = + (0..vector_length).map(|i| self.const_i32(i as i32)).collect::>(); + self.shuffle_vector(bitcasted, bitcasted, self.const_vector(&shuffle_mask)) + } + } + + fn zext_i1_vector_to_int( + &mut self, + mut val: &'ll Value, + src_ty: &'ll Type, + dest_ty: &'ll Type, + ) -> &'ll Value { + let vector_length = self.vector_length(src_ty) as u64; + let int_width = vector_length.next_power_of_two().max(8); + + if vector_length != int_width { + let shuffle_indices = match vector_length { + 0 => unreachable!("zero length vectors are not allowed"), + 1 => vec![0, 1, 1, 1, 1, 1, 1, 1], + 2 => vec![0, 1, 2, 3, 2, 3, 2, 3], + 3 => vec![0, 1, 2, 3, 4, 5, 3, 4], + 4.. => (0..int_width as i32).collect(), + }; + let shuffle_mask = + shuffle_indices.into_iter().map(|i| self.const_i32(i)).collect::>(); + val = + self.shuffle_vector(val, self.const_null(src_ty), self.const_vector(&shuffle_mask)); + } + + self.bitcast(val, dest_ty) + } + + fn autocast( + &mut self, + llfn: &'ll Value, + val: &'ll Value, + src_ty: &'ll Type, + dest_ty: &'ll Type, + is_argument: bool, + ) -> &'ll Value { + let (rust_ty, llvm_ty) = if is_argument { (src_ty, dest_ty) } else { (dest_ty, src_ty) }; + + if rust_ty == llvm_ty { + return val; + } + + match self.type_kind(llvm_ty) { + TypeKind::Struct => { + let mut ret = self.const_poison(dest_ty); + for (idx, (src_element_ty, dest_element_ty)) in + iter::zip(self.struct_element_types(src_ty), self.struct_element_types(dest_ty)) + .enumerate() + { + let elt = self.extract_value(val, idx as u64); + let casted_elt = + self.autocast(llfn, elt, src_element_ty, dest_element_ty, is_argument); + ret = self.insert_value(ret, casted_elt, idx as u64); + } + ret + } + TypeKind::Vector if self.element_type(llvm_ty) == self.type_i1() => { + if is_argument { + self.trunc_int_to_i1_vector(val, dest_ty) + } else { + self.zext_i1_vector_to_int(val, src_ty, dest_ty) + } + } + _ => self.bitcast(val, dest_ty), // for `bf16(xN)` <-> `u16(xN)` + } + } + + fn cast_arguments<'b>( + &mut self, + typ: &str, + fn_ty: &'ll Type, + llfn: &'ll Value, + args: &'b [&'ll Value], + has_fnabi: bool, + ) -> Cow<'b, [&'ll Value]> { + assert_eq!( + self.type_kind(fn_ty), + TypeKind::Function, + "{typ} not passed a function, but {fn_ty:?}" + ); + + let param_tys = self.func_params_types(fn_ty); + + let mut casted_args = Cow::Borrowed(args); + + for (idx, (dest_ty, &arg)) in iter::zip(param_tys, args).enumerate() { + let src_ty = self.val_ty(arg); + assert!( + self.equate_ty(src_ty, dest_ty), + "Cannot match `{dest_ty:?}` (expected) with `{src_ty:?}` (found) in `{llfn:?}`" + ); + + let casted_arg = self.autocast(llfn, arg, src_ty, dest_ty, true); + if arg != casted_arg { + assert!( + has_fnabi, + "Should inject autocasts in function call of {llfn:?}, but not able to get Rust signature" + ); + + casted_args.to_mut()[idx] = casted_arg; + } + } + + casted_args + } + + fn cast_return( + &mut self, + fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + llfn: &'ll Value, + ret: &'ll Value, + ) -> &'ll Value { + let src_ty = self.val_ty(ret); + let dest_ty = fn_abi.llvm_return_type(self); + assert!( + self.equate_ty(dest_ty, src_ty), + "Cannot match `{src_ty:?}` (expected) with `{dest_ty:?}` (found) in `{llfn:?}`" + ); + + self.autocast(llfn, ret, src_ty, dest_ty, false) + } + pub(crate) fn landing_pad( &mut self, ty: &'ll Type, @@ -1753,7 +1849,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { ) -> &'ll Value { debug!("invoke {:?} with args ({:?})", llfn, args); - let args = self.check_call("callbr", llty, llfn, args); + let args = self.cast_arguments("callbr", llty, llfn, args, fn_abi.is_some()); let funclet_bundle = funclet.map(|funclet| funclet.bundle()); let mut bundles: SmallVec<[_; 2]> = SmallVec::new(); if let Some(funclet_bundle) = funclet_bundle { @@ -1785,9 +1881,11 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { ) }; if let Some(fn_abi) = fn_abi { - fn_abi.apply_attrs_callsite(self, callbr); + fn_abi.apply_attrs_callsite(self, callbr, llfn); + self.cast_return(fn_abi, llfn, callbr) + } else { + callbr } - callbr } // Emits CFI pointer type membership tests. diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 48b2d09afb674..83e3f28d45d68 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -18,16 +18,17 @@ use rustc_codegen_ssa::traits::TypeMembershipCodegenMethods; use rustc_data_structures::fx::FxIndexSet; use rustc_middle::ty::{Instance, Ty}; use rustc_sanitizers::{cfi, kcfi}; +use rustc_session::lint::builtin::{DEPRECATED_LLVM_INTRINSIC, UNKNOWN_LLVM_INTRINSIC}; use rustc_target::callconv::FnAbi; use smallvec::SmallVec; use tracing::debug; -use crate::abi::FnAbiLlvmExt; -use crate::attributes; +use crate::abi::{FnAbiLlvmExt, FunctionSignature}; use crate::common::AsCCharPtr; use crate::context::{CodegenCx, GenericCx, SCx, SimpleCx}; use crate::llvm::AttributePlace::Function; use crate::llvm::{self, FromGeneric, Type, Value, Visibility}; +use crate::{attributes, errors}; /// Declare a function with a SimpleCx. /// @@ -148,6 +149,22 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { ) -> &'ll Value { debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi); + let signature = fn_abi.llvm_type(self, name.as_bytes()); + + let span = || instance.map(|instance| self.tcx.def_span(instance.def_id())); + + if let FunctionSignature::LLVMSignature(_, llvm_fn_ty) = signature { + // check if the intrinsic signatures match + if !fn_abi.verify_intrinsic_signature(self, llvm_fn_ty) { + self.tcx.dcx().emit_fatal(errors::IntrinsicSignatureMismatch { + name, + llvm_fn_ty: &format!("{llvm_fn_ty:?}"), + rust_fn_ty: &format!("{:?}", fn_abi.rust_signature(self)), + span: span(), + }); + } + } + // Function addresses in Rust are never significant, allowing functions to // be merged. let llfn = declare_raw_fn( @@ -156,9 +173,76 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { fn_abi.llvm_cconv(self), llvm::UnnamedAddr::Global, llvm::Visibility::Default, - fn_abi.llvm_type(self), + signature.fn_ty(), ); - fn_abi.apply_attrs_llfn(self, llfn, instance); + + if let Some(intrinsic) = signature.intrinsic() { + if intrinsic.is_target_specific() { + let (llvm_arch, _) = name[5..].split_once('.').unwrap(); + let target_arch = self.tcx.sess.target.arch.as_ref(); + + let is_correct_arch = match llvm_arch { + "aarch64" => matches!(target_arch, "aarch64" | "arm64ec"), + "amdgcn" => target_arch == "amdgpu", + "arm" | "bpf" | "hexagon" => target_arch == llvm_arch, + "loongarch" => matches!(target_arch, "loongarch32" | "loongarch64"), + "mips" => target_arch.starts_with("mips"), + "nvvm" => target_arch == "nvptx64", + "ppc" => matches!(target_arch, "powerpc" | "powerpc64"), + "riscv" => matches!(target_arch, "riscv32" | "riscv64"), + "s390" => target_arch == "s390x", + "spv" => target_arch == "spirv", + "wasm" => matches!(target_arch, "wasm32" | "wasm64"), + "x86" => matches!(target_arch, "x86" | "x86_64"), + _ => true, // fallback for unknown archs + }; + + if !is_correct_arch { + self.tcx.dcx().emit_fatal(errors::IntrinsicWrongArch { + name, + target_arch, + span: span(), + }); + } + } + } else { + // Don't apply any attributes to intrinsics, they will be applied by AutoUpgrade + fn_abi.apply_attrs_llfn(self, llfn, instance); + } + + if let FunctionSignature::MaybeInvalid(..) = signature { + let mut new_llfn = None; + let can_upgrade = + unsafe { llvm::LLVMRustUpgradeIntrinsicFunction(llfn, &mut new_llfn, false) }; + + // we can emit diagnostics for local crates only + if let Some(instance) = instance + && let Some(local_def_id) = instance.def_id().as_local() + { + let hir_id = self.tcx.local_def_id_to_hir_id(local_def_id); + let span = self.tcx.def_span(local_def_id); + + if can_upgrade { + // not all intrinsics are upgraded to some other intrinsics, most are upgraded to instruction sequences + let msg = if let Some(new_llfn) = new_llfn { + format!( + "using deprecated intrinsic `{name}`, `{}` can be used instead", + str::from_utf8(&llvm::get_value_name(new_llfn)).unwrap() + ) + } else { + format!("using deprecated intrinsic `{name}`") + }; + self.tcx.node_lint(DEPRECATED_LLVM_INTRINSIC, hir_id, |d| { + d.primary_message(msg).span(span); + }); + } else { + // This is either plain wrong, or this can be caused by incompatible LLVM versions, we let the user decide + self.tcx.node_lint(UNKNOWN_LLVM_INTRINSIC, hir_id, |d| { + d.primary_message(format!("invalid LLVM Intrinsic `{name}`")).span(span); + }); + } + } + } if self.tcx.sess.is_sanitizer_cfi_enabled() { if let Some(instance) = instance { diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs index 627b0c9ff3b33..22d7aeaac9aa0 100644 --- a/compiler/rustc_codegen_llvm/src/errors.rs +++ b/compiler/rustc_codegen_llvm/src/errors.rs @@ -147,3 +147,22 @@ pub(crate) struct FixedX18InvalidArch<'a> { #[derive(Diagnostic)] #[diag(codegen_llvm_sanitizer_kcfi_arity_requires_llvm_21_0_0)] pub(crate) struct SanitizerKcfiArityRequiresLLVM2100; + +#[derive(Diagnostic)] +#[diag(codegen_llvm_intrinsic_signature_mismatch)] +pub(crate) struct IntrinsicSignatureMismatch<'a> { + pub name: &'a str, + pub llvm_fn_ty: &'a str, + pub rust_fn_ty: &'a str, + #[primary_span] + pub span: Option, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_intrinsic_wrong_arch)] +pub(crate) struct IntrinsicWrongArch<'a> { + pub name: &'a str, + pub target_arch: &'a str, + #[primary_span] + pub span: Option, +} diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index c12383f19312d..b25c6cc4be0ee 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1073,7 +1073,7 @@ fn gen_fn<'a, 'll, 'tcx>( codegen: &mut dyn FnMut(Builder<'a, 'll, 'tcx>), ) -> (&'ll Type, &'ll Value) { let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty()); - let llty = fn_abi.llvm_type(cx); + let llty = fn_abi.llvm_type(cx, name.as_bytes()).fn_ty(); let llfn = cx.declare_fn(name, fn_abi, None); cx.set_frame_pointer_type(llfn); cx.apply_target_cpu_attr(llfn); diff --git a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs index e63043b21227f..496639bb8b6f4 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs @@ -73,7 +73,6 @@ unsafe extern "C" { pub(crate) fn LLVMDumpModule(M: &Module); pub(crate) fn LLVMDumpValue(V: &Value); pub(crate) fn LLVMGetFunctionCallConv(F: &Value) -> c_uint; - pub(crate) fn LLVMGetReturnType(T: &Type) -> &Type; pub(crate) fn LLVMGetParams(Fnc: &Value, params: *mut &Value); pub(crate) fn LLVMGetNamedFunction(M: &Module, Name: *const c_char) -> Option<&Value>; } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index c4b5cf413a725..e620cd9959100 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -963,6 +963,9 @@ unsafe extern "C" { pub(crate) fn LLVMDoubleTypeInContext(C: &Context) -> &Type; pub(crate) fn LLVMFP128TypeInContext(C: &Context) -> &Type; + // Operations on non-IEEE real types + pub(crate) fn LLVMBFloatTypeInContext(C: &Context) -> &Type; + // Operations on function types pub(crate) fn LLVMFunctionType<'a>( ReturnType: &'a Type, @@ -972,6 +975,8 @@ unsafe extern "C" { ) -> &'a Type; pub(crate) fn LLVMCountParamTypes(FunctionTy: &Type) -> c_uint; pub(crate) fn LLVMGetParamTypes<'a>(FunctionTy: &'a Type, Dest: *mut &'a Type); + pub(crate) fn LLVMGetReturnType(FunctionTy: &Type) -> &Type; + pub(crate) fn LLVMIsFunctionVarArg(FunctionTy: &Type) -> Bool; // Operations on struct types pub(crate) fn LLVMStructTypeInContext<'a>( @@ -1119,12 +1124,25 @@ unsafe extern "C" { // Operations about llvm intrinsics pub(crate) fn LLVMLookupIntrinsicID(Name: *const c_char, NameLen: size_t) -> c_uint; + pub(crate) fn LLVMIntrinsicIsOverloaded(ID: NonZero) -> Bool; + pub(crate) fn LLVMIntrinsicGetType<'a>( + C: &'a Context, + ID: NonZero, + ParamTypes: *const &'a Type, + ParamCount: size_t, + ) -> &'a Type; pub(crate) fn LLVMGetIntrinsicDeclaration<'a>( Mod: &'a Module, ID: NonZero, ParamTypes: *const &'a Type, ParamCount: size_t, ) -> &'a Value; + pub(crate) fn LLVMRustUpgradeIntrinsicFunction<'a>( + Fn: &'a Value, + NewFn: &mut Option<&'a Value>, + CanUpgradeDebugIntrinsicsToRecords: bool, + ) -> bool; + pub(crate) fn LLVMRustIsTargetIntrinsic(ID: NonZero) -> bool; // Operations on parameters pub(crate) fn LLVMIsAArgument(Val: &Value) -> Option<&Value>; @@ -1641,6 +1659,9 @@ unsafe extern "C" { Packed: Bool, ); + pub(crate) fn LLVMCountStructElementTypes(StructTy: &Type) -> c_uint; + pub(crate) fn LLVMGetStructElementTypes<'a>(StructTy: &'a Type, Dest: *mut &'a Type); + pub(crate) safe fn LLVMMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value; pub(crate) safe fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr); diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 4c58a92106d5c..fedeed8b53c30 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -311,6 +311,18 @@ impl Intrinsic { NonZero::new(id).map(|id| Self { id }) } + pub(crate) fn is_overloaded(self) -> bool { + unsafe { LLVMIntrinsicIsOverloaded(self.id).is_true() } + } + + pub(crate) fn is_target_specific(self) -> bool { + unsafe { LLVMRustIsTargetIntrinsic(self.id) } + } + + pub(crate) fn get_type<'ll>(self, llcx: &'ll Context, type_params: &[&'ll Type]) -> &'ll Type { + unsafe { LLVMIntrinsicGetType(llcx, self.id, type_params.as_ptr(), type_params.len()) } + } + pub(crate) fn get_declaration<'ll>( self, llmod: &'ll Module, diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index 81bb70c958790..f7e3754a82c32 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -68,6 +68,10 @@ impl<'ll, CX: Borrow>> GenericCx<'ll, CX> { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } } + pub(crate) fn get_return_type(&self, ty: &'ll Type) -> &'ll Type { + unsafe { llvm::LLVMGetReturnType(ty) } + } + pub(crate) fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { unsafe { let n_args = llvm::LLVMCountParamTypes(ty) as usize; @@ -77,6 +81,20 @@ impl<'ll, CX: Borrow>> GenericCx<'ll, CX> { args } } + + pub(crate) fn func_is_variadic(&self, ty: &'ll Type) -> bool { + unsafe { llvm::LLVMIsFunctionVarArg(ty).is_true() } + } + + pub(crate) fn struct_element_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { + unsafe { + let n_args = llvm::LLVMCountStructElementTypes(ty) as usize; + let mut args = Vec::with_capacity(n_args); + llvm::LLVMGetStructElementTypes(ty, args.as_mut_ptr()); + args.set_len(n_args); + args + } + } } impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { pub(crate) fn type_bool(&self) -> &'ll Type { @@ -156,6 +174,10 @@ impl<'ll, CX: Borrow>> GenericCx<'ll, CX> { ) } } + + pub(crate) fn type_bf16(&self) -> &'ll Type { + unsafe { llvm::LLVMBFloatTypeInContext(self.llcx()) } + } } impl<'ll, CX: Borrow>> BaseTypeCodegenMethods for GenericCx<'ll, CX> { @@ -229,7 +251,7 @@ impl<'ll, CX: Borrow>> BaseTypeCodegenMethods for GenericCx<'ll, CX> { fn float_width(&self, ty: &'ll Type) -> usize { match self.type_kind(ty) { - TypeKind::Half => 16, + TypeKind::Half | TypeKind::BFloat => 16, TypeKind::Float => 32, TypeKind::Double => 64, TypeKind::X86_FP80 => 80, @@ -286,8 +308,12 @@ impl<'ll, 'tcx> LayoutTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type { ty.llvm_type(self) } - fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type { - fn_abi.llvm_type(self) + fn fn_decl_backend_type( + &self, + fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + fn_ptr: &'ll Value, + ) -> &'ll Type { + fn_abi.llvm_type(self, &llvm::get_value_name(fn_ptr)).fn_ty() } fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type { fn_abi.ptr_to_llvm_type(self) diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index e371f1a76231c..ecce2d2a705e5 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -197,7 +197,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> { // If there is a cleanup block and the function we're calling can unwind, then // do an invoke, otherwise do a call. - let fn_ty = bx.fn_decl_backend_type(fn_abi); + let fn_ty = bx.fn_decl_backend_type(fn_abi, fn_ptr); let fn_attrs = if bx.tcx().def_kind(fx.instance.def_id()).has_codegen_attrs() { Some(bx.tcx().codegen_fn_attrs(fx.instance.def_id())) @@ -1846,7 +1846,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { if is_call_from_compiler_builtins_to_upstream_monomorphization(bx.tcx(), instance) { bx.abort(); } else { - let fn_ty = bx.fn_decl_backend_type(fn_abi); + let fn_ty = bx.fn_decl_backend_type(fn_abi, fn_ptr); let llret = bx.call(fn_ty, None, Some(fn_abi), fn_ptr, &[], funclet.as_ref(), None); bx.apply_attrs_to_cleanup_callsite(llret); diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index d629003bff5e8..20992464ae3ab 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -658,7 +658,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { }; let fn_ptr = bx.get_fn_addr(instance); let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty()); - let fn_ty = bx.fn_decl_backend_type(fn_abi); + let fn_ty = bx.fn_decl_backend_type(fn_abi, fn_ptr); let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() { Some(bx.tcx().codegen_instance_attrs(instance.def)) } else { diff --git a/compiler/rustc_codegen_ssa/src/size_of_val.rs b/compiler/rustc_codegen_ssa/src/size_of_val.rs index e1bd8014d7a2f..adebb297bd0ba 100644 --- a/compiler/rustc_codegen_ssa/src/size_of_val.rs +++ b/compiler/rustc_codegen_ssa/src/size_of_val.rs @@ -68,7 +68,7 @@ pub fn size_and_align_of_dst<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // Generate the call. Cannot use `do_call` since we don't have a MIR terminator so we // can't create a `TerminationCodegenHelper`. (But we are in good company, this code is // duplicated plenty of times.) - let fn_ty = bx.fn_decl_backend_type(fn_abi); + let fn_ty = bx.fn_decl_backend_type(fn_abi, llfn); bx.call( fn_ty, diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs index 32c24965e1bf6..3caa959ef3d25 100644 --- a/compiler/rustc_codegen_ssa/src/traits/type_.rs +++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs @@ -114,7 +114,11 @@ pub trait LayoutTypeCodegenMethods<'tcx>: BackendTypes { /// such as when it's stack-allocated or when it's being loaded or stored. fn backend_type(&self, layout: TyAndLayout<'tcx>) -> Self::Type; fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type; - fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type; + fn fn_decl_backend_type( + &self, + fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + fn_ptr: Self::Value, + ) -> Self::Type; fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type; fn reg_backend_type(&self, ty: &Reg) -> Self::Type; /// The backend type used for a rust type when it's in an SSA register. diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs index 939f3d088b12f..5f56af5c9abf6 100644 --- a/compiler/rustc_lint_defs/src/builtin.rs +++ b/compiler/rustc_lint_defs/src/builtin.rs @@ -35,6 +35,7 @@ declare_lint_pass! { DEPENDENCY_ON_UNIT_NEVER_TYPE_FALLBACK, DEPRECATED, DEPRECATED_IN_FUTURE, + DEPRECATED_LLVM_INTRINSIC, DEPRECATED_SAFE_2024, DEPRECATED_WHERE_CLAUSE_LOCATION, DUPLICATE_MACRO_ATTRIBUTES, @@ -117,6 +118,7 @@ declare_lint_pass! { UNKNOWN_CRATE_TYPES, UNKNOWN_DIAGNOSTIC_ATTRIBUTES, UNKNOWN_LINTS, + UNKNOWN_LLVM_INTRINSIC, UNNAMEABLE_TEST_ITEMS, UNNAMEABLE_TYPES, UNREACHABLE_CODE, @@ -5197,3 +5199,79 @@ declare_lint! { Warn, r#"detects when a function annotated with `#[inline(always)]` and `#[target_feature(enable = "..")]` is inlined into a caller without the required target feature"#, } + +declare_lint! { + /// The `unknown_llvm_intrinsic` lint detects usage of unknown LLVM intrinsics. + /// + /// ### Example + /// + /// ```rust,compile_fail + /// #![feature(link_llvm_intrinsics, abi_unadjusted)] + /// + /// unsafe extern "unadjusted" { + /// #[link_name = "llvm.abcde"] + /// fn foo(); + /// } + /// + /// #[inline(never)] + /// pub fn main() { + /// unsafe { foo() } + /// } + /// ``` + /// + /// {{produces}} + /// + /// ### Explanation + /// + /// Linking to an unknown LLVM intrinsic may cause linker errors (in general it's UB), + /// so this lint captures those undesirable scenarios. + pub UNKNOWN_LLVM_INTRINSIC, + Deny, + "detects uses of unknown LLVM intrinsics", + @feature_gate = link_llvm_intrinsics; +} + +declare_lint! { + /// The `deprecated_llvm_intrinsic` lint detects usage of deprecated LLVM intrinsics. + /// + /// ### Example + /// + /// ```rust,ignore (requires x86) + /// #![cfg(any(target_arch = "x86", target_arch = "x86_64"))] + /// #![feature(link_llvm_intrinsics, abi_unadjusted)] + /// #![deny(deprecated_llvm_intrinsic)] + /// + /// unsafe extern "unadjusted" { + /// #[link_name = "llvm.x86.addcarryx.u32"] + /// fn foo(a: u8, b: u32, c: u32, d: &mut u32) -> u8; + /// } + /// + /// #[inline(never)] + /// #[target_feature(enable = "adx")] + /// pub fn bar(a: u8, b: u32, c: u32, d: &mut u32) -> u8 { + /// unsafe { foo(a, b, c, d) } + /// } + /// ``` + /// + /// This will produce: + /// + /// ```text + /// error: Using deprecated intrinsic `llvm.x86.addcarryx.u32` + /// --> example.rs:7:5 + /// | + /// 7 | fn foo(a: u8, b: u32, c: u32, d: &mut u32) -> u8; + /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + /// | + /// ``` + /// + /// ### Explanation + /// + /// LLVM periodically updates its list of intrinsics. Removed intrinsics are unlikely + /// to be removed, but they may optimize less well than their new versions, so it's + /// best to use the new version. Also, some deprecated intrinsics might have buggy + /// behavior + pub DEPRECATED_LLVM_INTRINSIC, + Allow, + "detects uses of deprecated LLVM intrinsics", + @feature_gate = link_llvm_intrinsics; +} diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index e38474f09ff7a..c342f8546def3 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -9,6 +9,7 @@ #include "llvm/ADT/StringRef.h" #include "llvm/BinaryFormat/Magic.h" #include "llvm/Bitcode/BitcodeWriter.h" +#include "llvm/IR/AutoUpgrade.h" #include "llvm/IR/DIBuilder.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/DiagnosticHandler.h" @@ -1765,6 +1766,25 @@ extern "C" void LLVMRustGetMangledName(LLVMValueRef V, RustStringRef Str) { Mangler().getNameWithPrefix(OS, GV, true); } +extern "C" bool +LLVMRustUpgradeIntrinsicFunction(LLVMValueRef Fn, LLVMValueRef *NewFn, + bool canUpgradeDebugIntrinsicsToRecords) { + Function *F = unwrap(Fn); + Function *NewF = nullptr; + bool CanUpgrade = + UpgradeIntrinsicFunction(F, NewF, canUpgradeDebugIntrinsicsToRecords); + *NewFn = wrap(NewF); + return CanUpgrade; +} + +extern "C" bool LLVMRustIsTargetIntrinsic(unsigned ID) { +#if LLVM_VERSION_GE(20, 1) + return Intrinsic::isTargetIntrinsic(ID); +#else + return Function::isTargetIntrinsic(ID); +#endif +} + extern "C" int32_t LLVMRustGetElementTypeArgIndex(LLVMValueRef CallSite) { auto *CB = unwrap(CallSite); switch (CB->getIntrinsicID()) { diff --git a/tests/codegen-llvm/inject-autocast.rs b/tests/codegen-llvm/inject-autocast.rs new file mode 100644 index 0000000000000..d81c67e8a2df9 --- /dev/null +++ b/tests/codegen-llvm/inject-autocast.rs @@ -0,0 +1,97 @@ +//@ compile-flags: -C opt-level=0 +//@ only-x86_64 + +#![feature(link_llvm_intrinsics, abi_unadjusted, repr_simd, simd_ffi, portable_simd, f16)] +#![crate_type = "lib"] + +use std::simd::{f32x4, i16x8, i64x2}; + +#[repr(simd)] +pub struct Tile([i8; 1024]); + +#[repr(C, packed)] +pub struct Bar(u32, i64x2, i64x2, i64x2, i64x2, i64x2, i64x2); +// CHECK: %Bar = type <{ i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> }> + +#[repr(simd)] +pub struct f16x8([f16; 8]); + +// CHECK-LABEL: @struct_with_i1_vector_autocast +#[no_mangle] +pub unsafe fn struct_with_i1_vector_autocast(a: i64x2, b: i64x2) -> (u8, u8) { + extern "unadjusted" { + #[link_name = "llvm.x86.avx512.vp2intersect.q.128"] + fn foo(a: i64x2, b: i64x2) -> (u8, u8); + } + + // CHECK: [[A:%[0-9]+]] = call { <2 x i1>, <2 x i1> } @llvm.x86.avx512.vp2intersect.q.128(<2 x i64> {{.*}}, <2 x i64> {{.*}}) + // CHECK: [[B:%[0-9]+]] = extractvalue { <2 x i1>, <2 x i1> } [[A]], 0 + // CHECK: [[C:%[0-9]+]] = shufflevector <2 x i1> [[B]], <2 x i1> zeroinitializer, <8 x i32> + // CHECK: [[D:%[0-9]+]] = bitcast <8 x i1> [[C]] to i8 + // CHECK: [[E:%[0-9]+]] = insertvalue { i8, i8 } poison, i8 [[D]], 0 + // CHECK: [[F:%[0-9]+]] = extractvalue { <2 x i1>, <2 x i1> } [[A]], 1 + // CHECK: [[G:%[0-9]+]] = shufflevector <2 x i1> [[F]], <2 x i1> zeroinitializer, <8 x i32> + // CHECK: [[H:%[0-9]+]] = bitcast <8 x i1> [[G]] to i8 + // CHECK: insertvalue { i8, i8 } [[E]], i8 [[H]], 1 + foo(a, b) +} + +// CHECK-LABEL: @bf16_vector_autocast +#[no_mangle] +pub unsafe fn bf16_vector_autocast(a: f32x4) -> i16x8 { + extern "unadjusted" { + #[link_name = "llvm.x86.vcvtneps2bf16128"] + fn foo(a: f32x4) -> i16x8; + } + + // CHECK: [[A:%[0-9]+]] = call <8 x bfloat> @llvm.x86.vcvtneps2bf16128(<4 x float> {{.*}}) + // CHECK: bitcast <8 x bfloat> [[A]] to <8 x i16> + foo(a) +} + +// CHECK-LABEL: @struct_autocast +#[no_mangle] +pub unsafe fn struct_autocast(key_metadata: u32, key: i64x2) -> Bar { + extern "unadjusted" { + #[link_name = "llvm.x86.encodekey128"] + fn foo(key_metadata: u32, key: i64x2) -> Bar; + } + + // CHECK: [[A:%[0-9]+]] = call { i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.x86.encodekey128(i32 {{.*}}, <2 x i64> {{.*}}) + // CHECK: [[B:%[0-9]+]] = extractvalue { i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[A]], 0 + // CHECK: [[C:%[0-9]+]] = insertvalue %Bar poison, i32 [[B]], 0 + // CHECK: [[D:%[0-9]+]] = extractvalue { i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[A]], 1 + // CHECK: [[E:%[0-9]+]] = insertvalue %Bar [[C]], <2 x i64> [[D]], 1 + // CHECK: [[F:%[0-9]+]] = extractvalue { i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[A]], 2 + // CHECK: [[G:%[0-9]+]] = insertvalue %Bar [[E]], <2 x i64> [[F]], 2 + // CHECK: [[H:%[0-9]+]] = extractvalue { i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[A]], 3 + // CHECK: [[I:%[0-9]+]] = insertvalue %Bar [[G]], <2 x i64> [[H]], 3 + // CHECK: [[J:%[0-9]+]] = extractvalue { i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[A]], 4 + // CHECK: [[K:%[0-9]+]] = insertvalue %Bar [[I]], <2 x i64> [[J]], 4 + // CHECK: [[L:%[0-9]+]] = extractvalue { i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[A]], 5 + // CHECK: [[M:%[0-9]+]] = insertvalue %Bar [[K]], <2 x i64> [[L]], 5 + // CHECK: [[N:%[0-9]+]] = extractvalue { i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } [[A]], 6 + // CHECK: insertvalue %Bar [[M]], <2 x i64> [[N]], 6 + foo(key_metadata, key) +} + +// CHECK-LABEL: @i1_vector_autocast +#[no_mangle] +pub unsafe fn i1_vector_autocast(a: f16x8) -> u8 { + extern "unadjusted" { + #[link_name = "llvm.x86.avx512fp16.fpclass.ph.128"] + fn foo(a: f16x8, b: i32) -> u8; + } + + // CHECK: [[A:%[0-9]+]] = call <8 x i1> @llvm.x86.avx512fp16.fpclass.ph.128(<8 x half> {{.*}}, i32 1) + // CHECK: bitcast <8 x i1> [[A]] to i8 + foo(a, 1) +} + +// CHECK: declare { <2 x i1>, <2 x i1> } @llvm.x86.avx512.vp2intersect.q.128(<2 x i64>, <2 x i64>) + +// CHECK: declare <8 x bfloat> @llvm.x86.vcvtneps2bf16128(<4 x float>) + +// CHECK: declare { i32, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64>, <2 x i64> } @llvm.x86.encodekey128(i32, <2 x i64>) + +// CHECK: declare <8 x i1> @llvm.x86.avx512fp16.fpclass.ph.128(<8 x half>, i32 immarg) diff --git a/tests/run-make/simd-ffi/simd.rs b/tests/run-make/simd-ffi/simd.rs index 1cd961ff87e7b..3f12dabdb65ea 100644 --- a/tests/run-make/simd-ffi/simd.rs +++ b/tests/run-make/simd-ffi/simd.rs @@ -35,7 +35,7 @@ extern "C" { fn integer(a: i32x4, b: i32x4) -> i32x4; // vmaxq_s32 #[cfg(target_arch = "aarch64")] - #[link_name = "llvm.aarch64.neon.maxs.v4i32"] + #[link_name = "llvm.aarch64.neon.smax.v4i32"] fn integer(a: i32x4, b: i32x4) -> i32x4; // Use a generic LLVM intrinsic to do type checking on other platforms diff --git a/tests/ui/codegen/deprecated-llvm-intrinsic.rs b/tests/ui/codegen/deprecated-llvm-intrinsic.rs new file mode 100644 index 0000000000000..9affa7ebd275c --- /dev/null +++ b/tests/ui/codegen/deprecated-llvm-intrinsic.rs @@ -0,0 +1,28 @@ +//@ add-core-stubs +//@ build-pass +//@ ignore-pass +//@ compile-flags: --target aarch64-unknown-linux-gnu +//@ needs-llvm-components: aarch64 +#![feature(no_core, lang_items, link_llvm_intrinsics, abi_unadjusted, repr_simd, simd_ffi)] +#![no_std] +#![no_core] +#![allow(internal_features, non_camel_case_types, improper_ctypes)] +#![crate_type = "lib"] + +extern crate minicore; +use minicore::*; + +#[repr(simd)] +pub struct i8x8([i8; 8]); + +extern "unadjusted" { + #![warn(deprecated_llvm_intrinsic)] + #[link_name = "llvm.aarch64.neon.rbit.v8i8"] + fn foo(a: i8x8) -> i8x8; + //~^ WARN: using deprecated intrinsic `llvm.aarch64.neon.rbit.v8i8`, `llvm.bitreverse.v8i8` can be used instead +} + +#[target_feature(enable = "neon")] +pub unsafe fn bar(a: i8x8) -> i8x8 { + foo(a) +} diff --git a/tests/ui/codegen/deprecated-llvm-intrinsic.stderr b/tests/ui/codegen/deprecated-llvm-intrinsic.stderr new file mode 100644 index 0000000000000..8348056387eca --- /dev/null +++ b/tests/ui/codegen/deprecated-llvm-intrinsic.stderr @@ -0,0 +1,14 @@ +warning: using deprecated intrinsic `llvm.aarch64.neon.rbit.v8i8`, `llvm.bitreverse.v8i8` can be used instead + --> $DIR/deprecated-llvm-intrinsic.rs:21:5 + | +LL | fn foo(a: i8x8) -> i8x8; + | ^^^^^^^^^^^^^^^^^^^^^^^^ + | +note: the lint level is defined here + --> $DIR/deprecated-llvm-intrinsic.rs:19:13 + | +LL | #![warn(deprecated_llvm_intrinsic)] + | ^^^^^^^^^^^^^^^^^^^^^^^^^ + +warning: 1 warning emitted + diff --git a/tests/ui/codegen/incorrect-arch-intrinsic.rs b/tests/ui/codegen/incorrect-arch-intrinsic.rs new file mode 100644 index 0000000000000..b502fb9d7f7b8 --- /dev/null +++ b/tests/ui/codegen/incorrect-arch-intrinsic.rs @@ -0,0 +1,17 @@ +//@ build-fail +//@ ignore-s390x +//@ normalize-stderr: "target arch `(.*)`" -> "target arch `TARGET_ARCH`" + +#![feature(link_llvm_intrinsics, abi_unadjusted)] + +extern "unadjusted" { + #[link_name = "llvm.s390.sfpc"] + fn foo(a: i32); + //~^ ERROR: intrinsic `llvm.s390.sfpc` cannot be used with target arch +} + +pub fn main() { + unsafe { + foo(0); + } +} diff --git a/tests/ui/codegen/incorrect-arch-intrinsic.stderr b/tests/ui/codegen/incorrect-arch-intrinsic.stderr new file mode 100644 index 0000000000000..4a11a0bfa8bcb --- /dev/null +++ b/tests/ui/codegen/incorrect-arch-intrinsic.stderr @@ -0,0 +1,8 @@ +error: intrinsic `llvm.s390.sfpc` cannot be used with target arch `TARGET_ARCH` + --> $DIR/incorrect-arch-intrinsic.rs:9:5 + | +LL | fn foo(a: i32); + | ^^^^^^^^^^^^^^^ + +error: aborting due to 1 previous error + diff --git a/tests/ui/codegen/incorrect-llvm-intrinsic-signature.rs b/tests/ui/codegen/incorrect-llvm-intrinsic-signature.rs new file mode 100644 index 0000000000000..57d57136fdf23 --- /dev/null +++ b/tests/ui/codegen/incorrect-llvm-intrinsic-signature.rs @@ -0,0 +1,13 @@ +//@ build-fail + +#![feature(link_llvm_intrinsics, abi_unadjusted)] + +extern "unadjusted" { + #[link_name = "llvm.assume"] + fn foo(); + //~^ ERROR: intrinsic signature mismatch for `llvm.assume`: expected signature `void (i1)`, found `void ()` +} + +pub fn main() { + unsafe { foo() } +} diff --git a/tests/ui/codegen/incorrect-llvm-intrinsic-signature.stderr b/tests/ui/codegen/incorrect-llvm-intrinsic-signature.stderr new file mode 100644 index 0000000000000..7499bf00e7593 --- /dev/null +++ b/tests/ui/codegen/incorrect-llvm-intrinsic-signature.stderr @@ -0,0 +1,8 @@ +error: intrinsic signature mismatch for `llvm.assume`: expected signature `void (i1)`, found `void ()` + --> $DIR/incorrect-llvm-intrinsic-signature.rs:7:5 + | +LL | fn foo(); + | ^^^^^^^^^ + +error: aborting due to 1 previous error + diff --git a/tests/ui/codegen/invalid-llvm-intrinsic.rs b/tests/ui/codegen/invalid-llvm-intrinsic.rs new file mode 100644 index 0000000000000..b9e2d579b908c --- /dev/null +++ b/tests/ui/codegen/invalid-llvm-intrinsic.rs @@ -0,0 +1,13 @@ +//@ build-fail + +#![feature(link_llvm_intrinsics, abi_unadjusted)] + +extern "unadjusted" { + #[link_name = "llvm.abcde"] + fn foo(); + //~^ ERROR: invalid LLVM Intrinsic `llvm.abcde` +} + +pub fn main() { + unsafe { foo() } +} diff --git a/tests/ui/codegen/invalid-llvm-intrinsic.stderr b/tests/ui/codegen/invalid-llvm-intrinsic.stderr new file mode 100644 index 0000000000000..820a7a2e2ea9f --- /dev/null +++ b/tests/ui/codegen/invalid-llvm-intrinsic.stderr @@ -0,0 +1,10 @@ +error: invalid LLVM Intrinsic `llvm.abcde` + --> $DIR/invalid-llvm-intrinsic.rs:7:5 + | +LL | fn foo(); + | ^^^^^^^^^ + | + = note: `#[deny(unknown_llvm_intrinsic)]` on by default + +error: aborting due to 1 previous error +