Index: include/llvm/IR/Intrinsics.td =================================================================== --- include/llvm/IR/Intrinsics.td +++ include/llvm/IR/Intrinsics.td @@ -475,11 +475,11 @@ [Throws]>; def int_experimental_patchpoint_void : Intrinsic<[], [llvm_i64_ty, llvm_i32_ty, - llvm_ptr_ty, llvm_i32_ty, + llvm_anyptr_ty, llvm_i32_ty, llvm_vararg_ty]>; def int_experimental_patchpoint_i64 : Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, - llvm_ptr_ty, llvm_i32_ty, + llvm_anyptr_ty, llvm_i32_ty, llvm_vararg_ty]>; //===-------------------------- Other Intrinsics --------------------------===// Index: lib/IR/Function.cpp =================================================================== --- lib/IR/Function.cpp +++ lib/IR/Function.cpp @@ -420,6 +420,48 @@ return 0; } +/// Return a mangled type string suitable for use in the +/// name of an intrinsic parameterized by type +std::string getTypeMunge(Type* Ty) { + std::string Result; + if (PointerType* PTyp = dyn_cast(Ty) ) { + Result += "p" + llvm::utostr(PTyp->getAddressSpace()) + + getTypeMunge(PTyp->getElementType()); + } else if( ArrayType* ATyp = dyn_cast(Ty) ) { + Result += "a" + llvm::utostr(ATyp->getNumElements()) + + getTypeMunge(ATyp->getElementType()); + } else if( StructType* ST = dyn_cast(Ty) ) { + if( !ST->isLiteral() ) { + // TODO: + // a) type renaming? + // b) valid name restrictions? + Result += ST->getName(); + } else { + Result += "s_"; + for(size_t i = 0; i < ST->getNumElements(); i++) { + Result += getTypeMunge(ST->getElementType(i)); + } + Result += "s"; + } + } else if( FunctionType* FT = dyn_cast(Ty) ) { + Result += "f_" + getTypeMunge(FT->getReturnType()); + for(size_t i = 0; i < FT->getNumParams(); i++) { + Result += getTypeMunge(FT->getParamType(i)); + } + if( FT->isVarArg() ) { + Result += "vararg"; + } + Result += "f"; //ensure distinguishable + } else if (Ty) { + // If you fail an assert inside this, there's most likely + // a missing case in this function. + Result += EVT::getEVT(Ty).getEVTString(); + } else { + // ??? Is this possible? + } + return Result; +} + std::string Intrinsic::getName(ID id, ArrayRef Tys) { assert(id < num_intrinsics && "Invalid intrinsic ID!"); static const char * const Table[] = { @@ -432,12 +474,7 @@ return Table[id]; std::string Result(Table[id]); for (unsigned i = 0; i < Tys.size(); ++i) { - if (PointerType* PTyp = dyn_cast(Tys[i])) { - Result += ".p" + llvm::utostr(PTyp->getAddressSpace()) + - EVT::getEVT(PTyp->getElementType()).getEVTString(); - } - else if (Tys[i]) - Result += "." + EVT::getEVT(Tys[i]).getEVTString(); + Result += "." + getTypeMunge(Tys[i]); } return Result; } @@ -638,16 +675,23 @@ DecodeIITType(NextElt, IITEntries, T); } +static Type *DecodeFixedType(ArrayRef &Infos, + ArrayRef Tys, LLVMContext &Context); static Type *DecodeFixedType(ArrayRef &Infos, - ArrayRef Tys, LLVMContext &Context) { + ArrayRef Tys, LLVMContext &Context, + bool& isVarArg) { using namespace Intrinsic; IITDescriptor D = Infos.front(); Infos = Infos.slice(1); + isVarArg = false; + switch (D.Kind) { case IITDescriptor::Void: return Type::getVoidTy(Context); - case IITDescriptor::VarArg: return Type::getVoidTy(Context); + case IITDescriptor::VarArg: + isVarArg = true; + return NULL; case IITDescriptor::MMX: return Type::getX86_MMXTy(Context); case IITDescriptor::Metadata: return Type::getMetadataTy(Context); case IITDescriptor::Half: return Type::getHalfTy(Context); @@ -694,6 +738,15 @@ llvm_unreachable("unhandled"); } +// This is called from places where a training vararg is not legal +static Type *DecodeFixedType(ArrayRef &Infos, + ArrayRef Tys, LLVMContext &Context) { + bool isVarArg = false; + Type* Ty = DecodeFixedType(Infos, Tys, Context, isVarArg); + assert( !isVarArg && "vararg in internal position?"); + return Ty; +} + FunctionType *Intrinsic::getType(LLVMContext &Context, @@ -705,10 +758,20 @@ Type *ResultTy = DecodeFixedType(TableRef, Tys, Context); SmallVector ArgTys; - while (!TableRef.empty()) - ArgTys.push_back(DecodeFixedType(TableRef, Tys, Context)); - - return FunctionType::get(ResultTy, ArgTys, false); + bool isVarArg = false; + while (!TableRef.empty()) { + Type* Ty = DecodeFixedType(TableRef, Tys, Context, isVarArg); + if( Ty ) { + ArgTys.push_back(Ty); + } else if( isVarArg ) { + assert( TableRef.empty() && "var arg must be last" ); + break; + } else { + llvm_unreachable("Type must be non-null unless isVarArg"); + } + } + + return FunctionType::get(ResultTy, ArgTys, isVarArg); } bool Intrinsic::isOverloaded(ID id) { Index: test/CodeGen/X86/stackmap-generic.ll =================================================================== --- /dev/null +++ test/CodeGen/X86/stackmap-generic.ll @@ -0,0 +1,57 @@ +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=corei7 -disable-fp-elim | FileCheck %s +; These tests exercise the ability to use arbitrary function pointers in the +; patchpoint intrinsics rather than have to bitcast to i8* +; Note: Print verbose stackmaps using -debug-only=stackmaps. + +; There's some minimal checking of output in these tests, but the +; main purpose is to make sure we didn't trip internal assertions +; and that the stackmap section gets generated (which means the +; intrinsics were recognized) + +; CHECK-LABEL: .section __LLVM_STACKMAPS,__llvm_stackmaps +; CHECK-NEXT: __LLVM_StackMaps: +; Header +; CHECK-NEXT: .byte 1 +; CHECK-NEXT: .byte 0 +; CHECK-NEXT: .short 0 +; Num Functions +; CHECK-NEXT: .long 1 +; Num LargeConstants +; CHECK-NEXT: .long 0 +; Num Callsites +; CHECK-NEXT: .long 7 + +%struct1 = type { i32 } + +define void @test() { +entry: + %t_arg_i8 = inttoptr i64 12345678 to void (i8)* + call void (i64, i32, void(i8)*, i32, ...)* @llvm.experimental.patchpoint.void.p0f_isVoidi8f(i64 0, i32 20, void (i8)* %t_arg_i8, i32 1, i8 0) + + %t_arg_i32 = inttoptr i64 12345678 to void (i32)* + call void (i64, i32, void(i32)*, i32, ...)* @llvm.experimental.patchpoint.void.p0f_isVoidi32f(i64 1, i32 20, void (i32)* %t_arg_i32, i32 1, i32 0) + + %t_arg_i8ptr = inttoptr i64 12345678 to void (i8*)* + call void (i64, i32, void(i8*)*, i32, ...)* @llvm.experimental.patchpoint.void.p0f_isVoidp0i8f(i64 2, i32 20, void (i8*)* %t_arg_i8ptr, i32 1, i8* null) + + %t_arg_float = inttoptr i64 12345678 to void (float)* + call void (i64, i32, void(float)*, i32, ...)* @llvm.experimental.patchpoint.void.p0f_isVoidf32f(i64 3, i32 20, void (float)* %t_arg_float, i32 1, float 0.0) + + %t_arg_i32a = inttoptr i64 12345678 to void ([4 x i32])* + call void (i64, i32, void([4 x i32])*, i32, ...)* @llvm.experimental.patchpoint.void.p0f_isVoida4i32f(i64 4, i32 20, void ([4 x i32])* %t_arg_i32a, i32 1, [4 x i32] [i32 0, i32 0, i32 0, i32 0]) + + %t_arg_struct = inttoptr i64 12345678 to void (%struct1 )* + call void (i64, i32, void(%struct1)*, i32, ...)* @llvm.experimental.patchpoint.void.p0f_isVoidstruct1f(i64 5, i32 20, void (%struct1)* %t_arg_struct, i32 1, %struct1 {i32 0} ) + + %t_arg_var = inttoptr i64 12345678 to void (...)* + call void (i64, i32, void(...)*, i32, ...)* @llvm.experimental.patchpoint.void.p0f_isVoidvarargf(i64 1, i32 20, void (...)* %t_arg_var, i32 1, i32 0) + + ret void +} +declare void @llvm.experimental.patchpoint.void.p0f_isVoidi8f(i64, i32, void(i8)*, i32, ...) +declare void @llvm.experimental.patchpoint.void.p0f_isVoidp0i8f(i64, i32, void(i8*)*, i32, ...) +declare void @llvm.experimental.patchpoint.void.p0f_isVoidi32f(i64, i32, void(i32)*, i32, ...) +declare void @llvm.experimental.patchpoint.void.p0f_isVoidf32f(i64, i32, void(float)*, i32, ...) +declare void @llvm.experimental.patchpoint.void.p0f_isVoida4i32f(i64, i32, void([4 x i32])*, i32, ...) +declare void @llvm.experimental.patchpoint.void.p0f_isVoidstruct1f(i64, i32, void(%struct1)*, i32, ...) +declare void @llvm.experimental.patchpoint.void.p0f_isVoidvarargf(i64, i32, void(...)*, i32, ...)