Index: llvm/include/llvm/IR/Intrinsics.td =================================================================== --- llvm/include/llvm/IR/Intrinsics.td +++ llvm/include/llvm/IR/Intrinsics.td @@ -389,32 +389,21 @@ assert isAny, "LLVMAnyType.VT should have isOverloaded"; } -class LLVMQualPointerType - : LLVMType{ - LLVMType ElTy = elty; +class LLVMQualPointerType + : LLVMType { assert !and(!le(0, addrspace), !le(addrspace, 255)), "Address space exceeds 255"; - // D63507: LLVMPointerType - let isAny = elty.isAny; - - let Sig = !listconcat( + let Sig = !if(addrspace, [ IIT_ANYPTR.Number, addrspace, ], [ IIT_PTR.Number, - ]), - ElTy.Sig); + ]); } -class LLVMPointerType - : LLVMQualPointerType; - -class LLVMAnyPointerType - : LLVMAnyType { - LLVMType ElTy = elty; - +class LLVMAnyPointerType : LLVMAnyType { assert isAny, "iPTRAny should have isOverloaded"; } @@ -506,16 +495,13 @@ def llvm_f80_ty : LLVMType; def llvm_f128_ty : LLVMType; def llvm_ppcf128_ty : LLVMType; -def llvm_ptr_ty : LLVMPointerType; // i8* -def llvm_ptrptr_ty : LLVMPointerType; // i8** -def llvm_anyptr_ty : LLVMAnyPointerType; // (space)i8* -def llvm_empty_ty : LLVMType; // { } -def llvm_descriptor_ty : LLVMPointerType; // { }* -def llvm_metadata_ty : LLVMType; // !{...} -def llvm_token_ty : LLVMType; // token +def llvm_ptr_ty : LLVMQualPointerType<0>; // ptr +def llvm_anyptr_ty : LLVMAnyPointerType; // ptr addrspace(N) +def llvm_empty_ty : LLVMType; // { } +def llvm_metadata_ty : LLVMType; // !{...} +def llvm_token_ty : LLVMType; // token def llvm_x86mmx_ty : LLVMType; -def llvm_ptrx86mmx_ty : LLVMPointerType; // <1 x i64>* def llvm_aarch64_svcount_ty : LLVMType; @@ -726,12 +712,12 @@ //===------------------- Garbage Collection Intrinsics --------------------===// // def int_gcroot : Intrinsic<[], - [llvm_ptrptr_ty, llvm_ptr_ty]>; + [llvm_ptr_ty, llvm_ptr_ty]>; def int_gcread : Intrinsic<[llvm_ptr_ty], - [llvm_ptr_ty, llvm_ptrptr_ty], + [llvm_ptr_ty, llvm_ptr_ty], [IntrReadMem, IntrArgMemOnly]>; def int_gcwrite : Intrinsic<[], - [llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty], + [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty], [IntrArgMemOnly, NoCapture>, NoCapture>]>; @@ -747,19 +733,19 @@ def int_objc_autoreleaseReturnValue : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty]>; def int_objc_copyWeak : Intrinsic<[], - [llvm_ptrptr_ty, - llvm_ptrptr_ty]>; -def int_objc_destroyWeak : Intrinsic<[], [llvm_ptrptr_ty]>; + [llvm_ptr_ty, + llvm_ptr_ty]>; +def int_objc_destroyWeak : Intrinsic<[], [llvm_ptr_ty]>; def int_objc_initWeak : Intrinsic<[llvm_ptr_ty], - [llvm_ptrptr_ty, + [llvm_ptr_ty, llvm_ptr_ty]>; def int_objc_loadWeak : Intrinsic<[llvm_ptr_ty], - [llvm_ptrptr_ty]>; + [llvm_ptr_ty]>; def int_objc_loadWeakRetained : Intrinsic<[llvm_ptr_ty], - [llvm_ptrptr_ty]>; + [llvm_ptr_ty]>; def int_objc_moveWeak : Intrinsic<[], - [llvm_ptrptr_ty, - llvm_ptrptr_ty]>; + [llvm_ptr_ty, + llvm_ptr_ty]>; def int_objc_release : Intrinsic<[], [llvm_ptr_ty]>; def int_objc_retain : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty]>; @@ -772,10 +758,10 @@ def int_objc_retainBlock : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty]>; def int_objc_storeStrong : Intrinsic<[], - [llvm_ptrptr_ty, + [llvm_ptr_ty, llvm_ptr_ty]>; def int_objc_storeWeak : Intrinsic<[llvm_ptr_ty], - [llvm_ptrptr_ty, + [llvm_ptr_ty, llvm_ptr_ty]>; def int_objc_clang_arc_use : Intrinsic<[], [llvm_vararg_ty]>; @@ -797,23 +783,23 @@ def int_objc_sync_exit : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>; def int_objc_arc_annotation_topdown_bbstart : Intrinsic<[], - [llvm_ptrptr_ty, - llvm_ptrptr_ty]>; + [llvm_ptr_ty, + llvm_ptr_ty]>; def int_objc_arc_annotation_topdown_bbend : Intrinsic<[], - [llvm_ptrptr_ty, - llvm_ptrptr_ty]>; + [llvm_ptr_ty, + llvm_ptr_ty]>; def int_objc_arc_annotation_bottomup_bbstart : Intrinsic<[], - [llvm_ptrptr_ty, - llvm_ptrptr_ty]>; + [llvm_ptr_ty, + llvm_ptr_ty]>; def int_objc_arc_annotation_bottomup_bbend : Intrinsic<[], - [llvm_ptrptr_ty, - llvm_ptrptr_ty]>; + [llvm_ptr_ty, + llvm_ptr_ty]>; //===--------------- Swift asynchronous context intrinsics ----------------===// // Returns the location of the Swift asynchronous context (usually stored just // before the frame pointer), and triggers the creation of a null context if it // would otherwise be unneeded. -def int_swift_async_context_addr : Intrinsic<[llvm_ptrptr_ty], [], []>; +def int_swift_async_context_addr : Intrinsic<[llvm_ptr_ty], [], []>; //===--------------------- Code Generator Intrinsics ----------------------===// // @@ -902,7 +888,7 @@ // Stack Protector Intrinsic - The stackprotector intrinsic writes the stack // guard to the correct place on the stack frame. -def int_stackprotector : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], []>; +def int_stackprotector : DefaultAttrsIntrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], []>; def int_stackguard : DefaultAttrsIntrinsic<[llvm_ptr_ty], [], []>; // A cover for instrumentation based profiling. @@ -1383,7 +1369,7 @@ [IntrInaccessibleMemOnly], "llvm.var.annotation">; def int_ptr_annotation : DefaultAttrsIntrinsic< - [LLVMAnyPointerType], + [llvm_anyptr_ty], [LLVMMatchType<0>, llvm_anyptr_ty, LLVMMatchType<1>, llvm_i32_ty, LLVMMatchType<1>], [IntrInaccessibleMemOnly], "llvm.ptr.annotation">; @@ -1528,13 +1514,13 @@ [IntrArgMemOnly, IntrWillReturn, NoCapture>, ImmArg>]>; -def int_invariant_start : DefaultAttrsIntrinsic<[llvm_descriptor_ty], +def int_invariant_start : DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_i64_ty, llvm_anyptr_ty], [IntrArgMemOnly, IntrWillReturn, NoCapture>, ImmArg>]>; def int_invariant_end : DefaultAttrsIntrinsic<[], - [llvm_descriptor_ty, llvm_i64_ty, + [llvm_ptr_ty, llvm_i64_ty, llvm_anyptr_ty], [IntrArgMemOnly, IntrWillReturn, NoCapture>, @@ -1760,13 +1746,13 @@ // Memory Intrinsics def int_vp_store : DefaultAttrsIntrinsic<[], [ llvm_anyvector_ty, - LLVMAnyPointerType>, + llvm_anyptr_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty], [ NoCapture>, IntrNoSync, IntrWriteMem, IntrArgMemOnly, IntrWillReturn ]>; def int_vp_load : DefaultAttrsIntrinsic<[ llvm_anyvector_ty], - [ LLVMAnyPointerType>, + [ llvm_anyptr_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty], [ NoCapture>, IntrNoSync, IntrReadMem, IntrWillReturn, IntrArgMemOnly ]>; @@ -1787,14 +1773,14 @@ // Experimental strided memory accesses def int_experimental_vp_strided_store : DefaultAttrsIntrinsic<[], [ llvm_anyvector_ty, - LLVMAnyPointerToElt<0>, + llvm_anyptr_ty, llvm_anyint_ty, // Stride in bytes LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty], [ NoCapture>, IntrNoSync, IntrWriteMem, IntrArgMemOnly, IntrWillReturn ]>; def int_experimental_vp_strided_load : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [ LLVMAnyPointerToElt<0>, + [ llvm_anyptr_ty, llvm_anyint_ty, // Stride in bytes LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty], @@ -2188,14 +2174,14 @@ // def int_masked_load: DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMAnyPointerType>, llvm_i32_ty, + [llvm_anyptr_ty, llvm_i32_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<0>], [IntrReadMem, IntrArgMemOnly, IntrWillReturn, ImmArg>, NoCapture>]>; def int_masked_store: DefaultAttrsIntrinsic<[], - [llvm_anyvector_ty, LLVMAnyPointerType>, + [llvm_anyvector_ty, llvm_anyptr_ty, llvm_i32_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [IntrWriteMem, IntrArgMemOnly, IntrWillReturn, ImmArg>, NoCapture>]>; Index: llvm/include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAArch64.td +++ llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -557,7 +557,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". class AdvSIMD_1Vec_Load_Intrinsic - : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType>], + : DefaultAttrsIntrinsic<[llvm_anyvector_ty], [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_1Vec_Store_Lane_Intrinsic : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty], @@ -565,7 +565,7 @@ class AdvSIMD_2Vec_Load_Intrinsic : DefaultAttrsIntrinsic<[LLVMMatchType<0>, llvm_anyvector_ty], - [LLVMAnyPointerType>], + [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_2Vec_Load_Lane_Intrinsic : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>], @@ -574,7 +574,7 @@ [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_2Vec_Store_Intrinsic : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, - LLVMAnyPointerType>], + llvm_anyptr_ty], [IntrArgMemOnly, NoCapture>]>; class AdvSIMD_2Vec_Store_Lane_Intrinsic : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, @@ -583,7 +583,7 @@ class AdvSIMD_3Vec_Load_Intrinsic : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty], - [LLVMAnyPointerType>], + [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_3Vec_Load_Lane_Intrinsic : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], @@ -592,7 +592,7 @@ [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_3Vec_Store_Intrinsic : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, - LLVMMatchType<0>, LLVMAnyPointerType>], + LLVMMatchType<0>, llvm_anyptr_ty], [IntrArgMemOnly, NoCapture>]>; class AdvSIMD_3Vec_Store_Lane_Intrinsic : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, @@ -603,7 +603,7 @@ class AdvSIMD_4Vec_Load_Intrinsic : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty], - [LLVMAnyPointerType>], + [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_4Vec_Load_Lane_Intrinsic : DefaultAttrsIntrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, @@ -615,7 +615,7 @@ class AdvSIMD_4Vec_Store_Intrinsic : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, - LLVMAnyPointerType>], + llvm_anyptr_ty], [IntrArgMemOnly, NoCapture>]>; class AdvSIMD_4Vec_Store_Lane_Intrinsic : DefaultAttrsIntrinsic<[], [llvm_anyvector_ty, LLVMMatchType<0>, @@ -1354,8 +1354,7 @@ class SVE2_CONFLICT_DETECT_Intrinsic : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMAnyPointerType, - LLVMMatchType<1>], + [llvm_anyptr_ty, LLVMMatchType<1>], [IntrNoMem]>; class SVE2_3VectorArg_Indexed_Intrinsic Index: llvm/include/llvm/IR/IntrinsicsAMDGPU.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -53,7 +53,7 @@ // AS 7 is PARAM_I_ADDRESS, used for kernel arguments def int_r600_implicitarg_ptr : ClangBuiltin<"__builtin_r600_implicitarg_ptr">, - DefaultAttrsIntrinsic<[LLVMQualPointerType], [], + DefaultAttrsIntrinsic<[LLVMQualPointerType<7>], [], [IntrNoMem, IntrSpeculatable]>; def int_r600_rat_store_typed : @@ -141,22 +141,22 @@ <"__builtin_amdgcn_workgroup_id">; def int_amdgcn_dispatch_ptr : - DefaultAttrsIntrinsic<[LLVMQualPointerType], [], + DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [], [Align, IntrNoMem, IntrSpeculatable]>; def int_amdgcn_queue_ptr : ClangBuiltin<"__builtin_amdgcn_queue_ptr">, - DefaultAttrsIntrinsic<[LLVMQualPointerType], [], + DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [], [Align, IntrNoMem, IntrSpeculatable]>; def int_amdgcn_kernarg_segment_ptr : ClangBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">, - DefaultAttrsIntrinsic<[LLVMQualPointerType], [], + DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [], [Align, IntrNoMem, IntrSpeculatable]>; def int_amdgcn_implicitarg_ptr : ClangBuiltin<"__builtin_amdgcn_implicitarg_ptr">, - DefaultAttrsIntrinsic<[LLVMQualPointerType], [], + DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [], [Align, IntrNoMem, IntrSpeculatable]>; def int_amdgcn_groupstaticsize : @@ -173,7 +173,7 @@ def int_amdgcn_implicit_buffer_ptr : ClangBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">, - DefaultAttrsIntrinsic<[LLVMQualPointerType], [], + DefaultAttrsIntrinsic<[LLVMQualPointerType<4>], [], [Align, IntrNoMem, IntrSpeculatable]>; // Set EXEC to the 64-bit value given. @@ -463,7 +463,7 @@ class AMDGPULDSIntrin : Intrinsic<[llvm_any_ty], - [LLVMQualPointerType, 3>, + [LLVMQualPointerType<3>, LLVMMatchType<0>, llvm_i32_ty, // ordering llvm_i32_ty, // scope @@ -477,7 +477,7 @@ [llvm_i32_ty], // M0 = {hi16:address, lo16:waveID}. Allow passing M0 as a pointer, so that // the bit packing can be optimized at the IR level. - [LLVMQualPointerType, // IntToPtr(M0) + [LLVMQualPointerType<2>, // IntToPtr(M0) llvm_i32_ty, // value to add or swap llvm_i32_ty, // ordering llvm_i32_ty, // scope @@ -994,13 +994,12 @@ // Data type for buffer resources (V#). Maybe, in the future, we can create a // similar one for textures (T#). -class AMDGPUBufferRsrcTy - : LLVMQualPointerType; +def AMDGPUBufferRsrcTy : LLVMQualPointerType<8>; let TargetPrefix = "amdgcn" in { def int_amdgcn_make_buffer_rsrc : DefaultAttrsIntrinsic < - [AMDGPUBufferRsrcTy], + [AMDGPUBufferRsrcTy], [llvm_anyptr_ty, // base llvm_i16_ty, // stride (and swizzle control) llvm_i32_ty, // NumRecords / extent @@ -1073,7 +1072,7 @@ class AMDGPURawPtrBufferLoad : DefaultAttrsIntrinsic < [data_ty], - [AMDGPUBufferRsrcTy>, // rsrc(SGPR) + [AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, @@ -1103,7 +1102,7 @@ class AMDGPUStructPtrBufferLoad : DefaultAttrsIntrinsic < [data_ty], - [AMDGPUBufferRsrcTy>, // rsrc(SGPR) + [AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) @@ -1135,7 +1134,7 @@ class AMDGPURawPtrBufferStore : DefaultAttrsIntrinsic < [], [data_ty, // vdata(VGPR) - AMDGPUBufferRsrcTy>, // rsrc(SGPR) + AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc, @@ -1167,7 +1166,7 @@ class AMDGPUStructPtrBufferStore : DefaultAttrsIntrinsic < [], [data_ty, // vdata(VGPR) - AMDGPUBufferRsrcTy>, // rsrc(SGPR) + AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) @@ -1218,7 +1217,7 @@ class AMDGPURawPtrBufferAtomic : Intrinsic < !if(NoRtn, [], [data_ty]), [!if(NoRtn, data_ty, LLVMMatchType<0>), // vdata(VGPR) - AMDGPUBufferRsrcTy>, // rsrc(SGPR) + AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // cachepolicy(imm; bit 1 = slc) @@ -1244,7 +1243,7 @@ [llvm_anyint_ty], [LLVMMatchType<0>, // src(VGPR) LLVMMatchType<0>, // cmp(VGPR) - AMDGPUBufferRsrcTy>, // rsrc(SGPR) + AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // cachepolicy(imm; bit 1 = slc) @@ -1293,7 +1292,7 @@ class AMDGPUStructPtrBufferAtomic : Intrinsic < !if(NoRtn, [], [data_ty]), [!if(NoRtn, data_ty, LLVMMatchType<0>), // vdata(VGPR) - AMDGPUBufferRsrcTy>, // rsrc(SGPR) + AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) @@ -1317,7 +1316,7 @@ [llvm_anyint_ty], [LLVMMatchType<0>, // src(VGPR) LLVMMatchType<0>, // cmp(VGPR) - AMDGPUBufferRsrcTy>, // rsrc(SGPR) + AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) @@ -1391,7 +1390,7 @@ def int_amdgcn_raw_ptr_tbuffer_load : DefaultAttrsIntrinsic < [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32 - [AMDGPUBufferRsrcTy>, // rsrc(SGPR) + [AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds` checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt) @@ -1421,7 +1420,7 @@ def int_amdgcn_raw_ptr_tbuffer_store : DefaultAttrsIntrinsic < [], [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32 - AMDGPUBufferRsrcTy>, // rsrc(SGPR) + AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt) @@ -1450,7 +1449,7 @@ def int_amdgcn_struct_ptr_tbuffer_load : DefaultAttrsIntrinsic < [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32 - [AMDGPUBufferRsrcTy>, // rsrc(SGPR) + [AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) @@ -1466,7 +1465,7 @@ def int_amdgcn_struct_ptr_tbuffer_store : DefaultAttrsIntrinsic < [], [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32 - AMDGPUBufferRsrcTy>, // rsrc(SGPR) + AMDGPUBufferRsrcTy, // rsrc(SGPR) llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) @@ -1543,7 +1542,7 @@ class AMDGPURawBufferLoadLDS : Intrinsic < [], [llvm_v4i32_ty, // rsrc(SGPR) - LLVMQualPointerType, // LDS base offset + LLVMQualPointerType<3>, // LDS base offset llvm_i32_ty, // Data byte size: 1/2/4 llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) @@ -1558,8 +1557,8 @@ class AMDGPURawPtrBufferLoadLDS : Intrinsic < [], - [AMDGPUBufferRsrcTy, // rsrc(SGPR) - LLVMQualPointerType, // LDS base offset + [AMDGPUBufferRsrcTy, // rsrc(SGPR) + LLVMQualPointerType<3>, // LDS base offset llvm_i32_ty, // Data byte size: 1/2/4 llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) @@ -1578,7 +1577,7 @@ class AMDGPUStructBufferLoadLDS : Intrinsic < [], [llvm_v4i32_ty, // rsrc(SGPR) - LLVMQualPointerType, // LDS base offset + LLVMQualPointerType<3>, // LDS base offset llvm_i32_ty, // Data byte size: 1/2/4 llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling) @@ -1594,8 +1593,8 @@ class AMDGPUStructPtrBufferLoadLDS : Intrinsic < [], - [AMDGPUBufferRsrcTy, // rsrc(SGPR) - LLVMQualPointerType, // LDS base offset + [AMDGPUBufferRsrcTy, // rsrc(SGPR) + LLVMQualPointerType<3> , // LDS base offset llvm_i32_ty, // Data byte size: 1/2/4 llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // voffset(VGPR, included in bounds checking and swizzling) @@ -2208,8 +2207,8 @@ class AMDGPUGlobalLoadLDS : Intrinsic < [], - [LLVMQualPointerType, // Base global pointer to load from - LLVMQualPointerType, // LDS base pointer to store to + [LLVMQualPointerType<1>, // Base global pointer to load from + LLVMQualPointerType<3>, // LDS base pointer to store to llvm_i32_ty, // Data byte size: 1/2/4 llvm_i32_ty, // imm offset (applied to both global and LDS address) llvm_i32_ty], // auxiliary data (imm, cachepolicy (bit 0 = glc/sc0, @@ -2624,7 +2623,7 @@ def int_amdgcn_flat_atomic_fadd_v2bf16 : AMDGPUGlobalAtomicRtn; def int_amdgcn_ds_fadd_v2bf16 : DefaultAttrsIntrinsic< [llvm_v2i16_ty], - [LLVMQualPointerType, llvm_v2i16_ty], + [LLVMQualPointerType<3>, llvm_v2i16_ty], [IntrArgMemOnly, NoCapture>]>, ClangBuiltin<"__builtin_amdgcn_ds_atomic_fadd_v2bf16">; Index: llvm/include/llvm/IR/IntrinsicsARM.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsARM.td +++ llvm/include/llvm/IR/IntrinsicsARM.td @@ -702,13 +702,13 @@ def int_arm_neon_vld1x2 : DefaultAttrsIntrinsic< [llvm_anyvector_ty, LLVMMatchType<0>], - [LLVMAnyPointerType>], [IntrReadMem, IntrArgMemOnly]>; + [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>; def int_arm_neon_vld1x3 : DefaultAttrsIntrinsic< [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>], - [LLVMAnyPointerType>], [IntrReadMem, IntrArgMemOnly]>; + [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>; def int_arm_neon_vld1x4 : DefaultAttrsIntrinsic< [llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], - [LLVMAnyPointerType>], [IntrReadMem, IntrArgMemOnly]>; + [llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>; // Vector load N-element structure to one lane. // Source operands are: the address, the N input vectors (since only one Index: llvm/include/llvm/IR/IntrinsicsHexagon.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsHexagon.td +++ llvm/include/llvm/IR/IntrinsicsHexagon.td @@ -125,30 +125,27 @@ def int_hexagon_prefetch : Hexagon_Intrinsic<"HEXAGON_prefetch", [], [llvm_ptr_ty], []>; -def llvm_ptr32_ty : LLVMPointerType; -def llvm_ptr64_ty : LLVMPointerType; - // Mark locked loads as read/write to prevent any accidental reordering. // These don't use Hexagon_Intrinsic, because they are not nosync, and as such // cannot use default attributes. let TargetPrefix = "hexagon" in { def int_hexagon_L2_loadw_locked : ClangBuiltin<"__builtin_HEXAGON_L2_loadw_locked">, - Intrinsic<[llvm_i32_ty], [llvm_ptr32_ty], + Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrArgMemOnly, NoCapture>]>; def int_hexagon_L4_loadd_locked : ClangBuiltin<"__builtin__HEXAGON_L4_loadd_locked">, - Intrinsic<[llvm_i64_ty], [llvm_ptr64_ty], + Intrinsic<[llvm_i64_ty], [llvm_ptr_ty], [IntrArgMemOnly, NoCapture>]>; def int_hexagon_S2_storew_locked : ClangBuiltin<"__builtin_HEXAGON_S2_storew_locked">, Intrinsic<[llvm_i32_ty], - [llvm_ptr32_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture>]>; + [llvm_ptr_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture>]>; def int_hexagon_S4_stored_locked : ClangBuiltin<"__builtin_HEXAGON_S4_stored_locked">, Intrinsic<[llvm_i32_ty], - [llvm_ptr64_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture>]>; + [llvm_ptr_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture>]>; } def int_hexagon_vmemcpy : Hexagon_Intrinsic<"hexagon_vmemcpy", @@ -266,7 +263,7 @@ class Hexagon_pred_vload_imm : Hexagon_NonGCC_Intrinsic< [ValTy], - [llvm_i1_ty, LLVMPointerType, llvm_i32_ty], + [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly, NoCapture>, ImmArg>]>; @@ -284,8 +281,8 @@ class Hexagom_pred_vload_upd : Hexagon_NonGCC_Intrinsic< - [ValTy, LLVMPointerType], - [llvm_i1_ty, LLVMPointerType, llvm_i32_ty], + [ValTy, llvm_ptr_ty], + [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty], !if(TakesImm, [IntrReadMem, IntrArgMemOnly, NoCapture>, ImmArg>], @@ -318,7 +315,7 @@ class Hexagon_pred_vstore_imm : Hexagon_NonGCC_Intrinsic< [], - [llvm_i1_ty, LLVMPointerType, llvm_i32_ty, ValTy], + [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty, ValTy], [IntrWriteMem, IntrArgMemOnly, NoCapture>, ImmArg>]>; @@ -340,8 +337,8 @@ class Hexagon_pred_vstore_upd : Hexagon_NonGCC_Intrinsic< - [LLVMPointerType], - [llvm_i1_ty, LLVMPointerType, llvm_i32_ty, ValTy], + [llvm_ptr_ty], + [llvm_i1_ty, llvm_ptr_ty, llvm_i32_ty, ValTy], !if(TakesImm, [IntrWriteMem, IntrArgMemOnly, NoCapture>, ImmArg>], Index: llvm/include/llvm/IR/IntrinsicsNVVM.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsNVVM.td +++ llvm/include/llvm/IR/IntrinsicsNVVM.td @@ -31,11 +31,8 @@ // * llvm.nvvm.max.ull --> ibid. // * llvm.nvvm.h2f --> llvm.convert.to.fp16.f32 -def llvm_global_i8ptr_ty : LLVMQualPointerType; // (global)i8* -def llvm_shared_i8ptr_ty : LLVMQualPointerType; // (shared)i8* -def llvm_i64ptr_ty : LLVMPointerType; // i64* -def llvm_any_i64ptr_ty : LLVMAnyPointerType; // (space)i64* -def llvm_shared_i64ptr_ty : LLVMQualPointerType; // (shared)i64* +def llvm_global_ptr_ty : LLVMQualPointerType<1>; // (global)ptr +def llvm_shared_ptr_ty : LLVMQualPointerType<3>; // (shared)ptr // // MISC @@ -1293,19 +1290,19 @@ // Atomics not available as llvm intrinsics. def int_nvvm_atomic_load_inc_32 : Intrinsic<[llvm_i32_ty], - [LLVMAnyPointerType, llvm_i32_ty], + [llvm_anyptr_ty, llvm_i32_ty], [IntrArgMemOnly, IntrNoCallback, NoCapture>]>; def int_nvvm_atomic_load_dec_32 : Intrinsic<[llvm_i32_ty], - [LLVMAnyPointerType, llvm_i32_ty], + [llvm_anyptr_ty, llvm_i32_ty], [IntrArgMemOnly, IntrNoCallback, NoCapture>]>; class SCOPED_ATOMIC2_impl : Intrinsic<[elty], - [LLVMAnyPointerType>, LLVMMatchType<0>], + [llvm_anyptr_ty, LLVMMatchType<0>], [IntrArgMemOnly, IntrNoCallback, NoCapture>]>; class SCOPED_ATOMIC3_impl : Intrinsic<[elty], - [LLVMAnyPointerType>, LLVMMatchType<0>, + [llvm_anyptr_ty, LLVMMatchType<0>, LLVMMatchType<0>], [IntrArgMemOnly, IntrNoCallback, NoCapture>]>; @@ -1388,23 +1385,23 @@ // Async Copy def int_nvvm_cp_async_mbarrier_arrive : ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive">, - Intrinsic<[],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_cp_async_mbarrier_arrive_shared : ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_shared">, - Intrinsic<[],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_cp_async_mbarrier_arrive_noinc : ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_noinc">, - Intrinsic<[],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_cp_async_mbarrier_arrive_noinc_shared : ClangBuiltin<"__nvvm_cp_async_mbarrier_arrive_noinc_shared">, - Intrinsic<[],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>; multiclass CP_ASYNC_SHARED_GLOBAL { - def NAME: Intrinsic<[],[llvm_shared_i8ptr_ty, llvm_global_i8ptr_ty], + def NAME: Intrinsic<[],[llvm_shared_ptr_ty, llvm_global_ptr_ty], [IntrArgMemOnly, IntrNoCallback, NoAlias>, NoAlias>, WriteOnly>, ReadOnly>], "llvm.nvvm.cp.async." # cc # ".shared.global." # n>; - def _s: Intrinsic<[],[llvm_shared_i8ptr_ty, llvm_global_i8ptr_ty, llvm_i32_ty], + def _s: Intrinsic<[],[llvm_shared_ptr_ty, llvm_global_ptr_ty, llvm_i32_ty], [IntrArgMemOnly, IntrNoCallback, NoAlias>, NoAlias>, WriteOnly>, ReadOnly>], "llvm.nvvm.cp.async." # cc # ".shared.global." # n # ".s">; @@ -1429,54 +1426,54 @@ // mbarrier def int_nvvm_mbarrier_init : ClangBuiltin<"__nvvm_mbarrier_init">, - Intrinsic<[],[llvm_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_init_shared : ClangBuiltin<"__nvvm_mbarrier_init_shared">, - Intrinsic<[],[llvm_shared_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[],[llvm_shared_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_inval : ClangBuiltin<"__nvvm_mbarrier_inval">, - Intrinsic<[],[llvm_i64ptr_ty], + Intrinsic<[],[llvm_ptr_ty], [IntrConvergent, IntrWriteMem, IntrArgMemOnly, IntrNoCallback, WriteOnly>, NoCapture>]>; def int_nvvm_mbarrier_inval_shared : ClangBuiltin<"__nvvm_mbarrier_inval_shared">, - Intrinsic<[],[llvm_shared_i64ptr_ty], + Intrinsic<[],[llvm_shared_ptr_ty], [IntrConvergent, IntrWriteMem, IntrArgMemOnly, IntrNoCallback, WriteOnly>, NoCapture>]>; def int_nvvm_mbarrier_arrive : ClangBuiltin<"__nvvm_mbarrier_arrive">, - Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[llvm_i64_ty],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_arrive_shared : ClangBuiltin<"__nvvm_mbarrier_arrive_shared">, - Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_arrive_noComplete : ClangBuiltin<"__nvvm_mbarrier_arrive_noComplete">, - Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[llvm_i64_ty],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_arrive_noComplete_shared : ClangBuiltin<"__nvvm_mbarrier_arrive_noComplete_shared">, - Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty, + Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_arrive_drop : ClangBuiltin<"__nvvm_mbarrier_arrive_drop">, - Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[llvm_i64_ty],[llvm_ptr_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_arrive_drop_shared : ClangBuiltin<"__nvvm_mbarrier_arrive_drop_shared">, - Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_arrive_drop_noComplete : ClangBuiltin<"__nvvm_mbarrier_arrive_drop_noComplete">, - Intrinsic<[llvm_i64_ty],[llvm_i64ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[llvm_i64_ty],[llvm_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_arrive_drop_noComplete_shared : ClangBuiltin<"__nvvm_mbarrier_arrive_drop_noComplete_shared">, - Intrinsic<[llvm_i64_ty],[llvm_shared_i64ptr_ty, + Intrinsic<[llvm_i64_ty],[llvm_shared_ptr_ty, llvm_i32_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_test_wait : ClangBuiltin<"__nvvm_mbarrier_test_wait">, - Intrinsic<[llvm_i1_ty],[llvm_i64ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[llvm_i1_ty],[llvm_ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_test_wait_shared : ClangBuiltin<"__nvvm_mbarrier_test_wait_shared">, - Intrinsic<[llvm_i1_ty],[llvm_shared_i64ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>; + Intrinsic<[llvm_i1_ty],[llvm_shared_ptr_ty, llvm_i64_ty],[IntrConvergent, IntrNoCallback]>; def int_nvvm_mbarrier_pending_count : ClangBuiltin<"__nvvm_mbarrier_pending_count">, @@ -1485,30 +1482,30 @@ // Generated within nvvm. Use for ldu on sm_20 or later. Second arg is the // pointer's alignment. def int_nvvm_ldu_global_i : Intrinsic<[llvm_anyint_ty], - [LLVMAnyPointerType>, llvm_i32_ty], + [llvm_anyptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture>], "llvm.nvvm.ldu.global.i">; def int_nvvm_ldu_global_f : Intrinsic<[llvm_anyfloat_ty], - [LLVMAnyPointerType>, llvm_i32_ty], + [llvm_anyptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture>], "llvm.nvvm.ldu.global.f">; def int_nvvm_ldu_global_p : Intrinsic<[llvm_anyptr_ty], - [LLVMAnyPointerType>, llvm_i32_ty], + [llvm_anyptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture>], "llvm.nvvm.ldu.global.p">; // Generated within nvvm. Use for ldg on sm_35 or later. Second arg is the // pointer's alignment. def int_nvvm_ldg_global_i : Intrinsic<[llvm_anyint_ty], - [LLVMAnyPointerType>, llvm_i32_ty], + [llvm_anyptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture>], "llvm.nvvm.ldg.global.i">; def int_nvvm_ldg_global_f : Intrinsic<[llvm_anyfloat_ty], - [LLVMAnyPointerType>, llvm_i32_ty], + [llvm_anyptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture>], "llvm.nvvm.ldg.global.f">; def int_nvvm_ldg_global_p : Intrinsic<[llvm_anyptr_ty], - [LLVMAnyPointerType>, llvm_i32_ty], + [llvm_anyptr_ty, llvm_i32_ty], [IntrReadMem, IntrArgMemOnly, IntrNoCallback, NoCapture>], "llvm.nvvm.ldg.global.p">; @@ -1571,7 +1568,7 @@ // For getting the handle from a texture or surface variable def int_nvvm_texsurf_handle - : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_any_i64ptr_ty], + : Intrinsic<[llvm_i64_ty], [llvm_metadata_ty, llvm_anyptr_ty], [IntrNoMem], "llvm.nvvm.texsurf.handle">; def int_nvvm_texsurf_handle_internal : Intrinsic<[llvm_i64_ty], [llvm_anyptr_ty], @@ -4697,7 +4694,7 @@ [IntrNoMem, IntrSpeculatable, NoCapture>], "llvm.nvvm.mapa">; def int_nvvm_mapa_shared_cluster - : DefaultAttrsIntrinsic<[llvm_shared_i8ptr_ty], [llvm_shared_i8ptr_ty, llvm_i32_ty], + : DefaultAttrsIntrinsic<[llvm_shared_ptr_ty], [llvm_shared_ptr_ty, llvm_i32_ty], [IntrNoMem, IntrSpeculatable, NoCapture>], "llvm.nvvm.mapa.shared.cluster">; def int_nvvm_getctarank @@ -4705,7 +4702,7 @@ [IntrNoMem, IntrSpeculatable, NoCapture>], "llvm.nvvm.getctarank">; def int_nvvm_getctarank_shared_cluster - : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_shared_i8ptr_ty], + : DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_shared_ptr_ty], [IntrNoMem, IntrSpeculatable, NoCapture>], "llvm.nvvm.getctarank.shared.cluster">; def int_nvvm_is_explicit_cluster Index: llvm/include/llvm/IR/IntrinsicsRISCV.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsRISCV.td +++ llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -146,8 +146,7 @@ // Input: (pointer, vl) class RISCVUSMLoad : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMPointerType>, - llvm_anyint_ty], + [llvm_ptr_ty, llvm_anyint_ty], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { let VLOperand = 1; } @@ -155,9 +154,7 @@ // Input: (passthru, pointer, vl) class RISCVUSLoad : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, - LLVMPointerType>, - llvm_anyint_ty], + [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { let VLOperand = 2; } @@ -168,8 +165,7 @@ // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. class RISCVUSLoadFF : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty], - [LLVMMatchType<0>, - LLVMPointerType>, LLVMMatchType<1>], + [LLVMMatchType<0>, llvm_ptr_ty, LLVMMatchType<1>], [NoCapture>]>, RISCVVIntrinsic { let VLOperand = 2; @@ -178,8 +174,7 @@ // Input: (maskedoff, pointer, mask, vl, policy) class RISCVUSLoadMasked : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], - [LLVMMatchType<0>, - LLVMPointerType>, + [LLVMMatchType<0>, llvm_ptr_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<1>], [NoCapture>, ImmArg>, IntrReadMem]>, @@ -193,8 +188,7 @@ // VL as a side effect. IntrReadMem, IntrHasSideEffects does not work. class RISCVUSLoadFFMasked : DefaultAttrsIntrinsic<[llvm_anyvector_ty, llvm_anyint_ty], - [LLVMMatchType<0>, - LLVMPointerType>, + [LLVMMatchType<0>, llvm_ptr_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, LLVMMatchType<1>], [NoCapture>, ImmArg>]>, RISCVVIntrinsic { @@ -204,8 +198,7 @@ // Input: (passthru, pointer, stride, vl) class RISCVSLoad : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, - LLVMPointerType>, + [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty, LLVMMatchType<1>], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { let VLOperand = 3; @@ -214,8 +207,7 @@ // Input: (maskedoff, pointer, stride, mask, vl, policy) class RISCVSLoadMasked : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], - [LLVMMatchType<0>, - LLVMPointerType>, llvm_anyint_ty, + [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>, LLVMMatchType<1>], [NoCapture>, ImmArg>, IntrReadMem]>, @@ -226,8 +218,7 @@ // Input: (passthru, pointer, index, vl) class RISCVILoad : DefaultAttrsIntrinsic<[llvm_anyvector_ty], - [LLVMMatchType<0>, - LLVMPointerType>, + [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty, llvm_anyint_ty], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic { let VLOperand = 3; @@ -236,8 +227,7 @@ // Input: (maskedoff, pointer, index, mask, vl, policy) class RISCVILoadMasked : DefaultAttrsIntrinsic<[llvm_anyvector_ty ], - [LLVMMatchType<0>, - LLVMPointerType>, llvm_anyvector_ty, + [LLVMMatchType<0>, llvm_ptr_ty, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty, LLVMMatchType<2>], [NoCapture>, ImmArg>, IntrReadMem]>, @@ -248,9 +238,7 @@ // Input: (vector_in, pointer, vl) class RISCVUSStore : DefaultAttrsIntrinsic<[], - [llvm_anyvector_ty, - LLVMPointerType>, - llvm_anyint_ty], + [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty], [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { let VLOperand = 2; } @@ -258,8 +246,7 @@ // Input: (vector_in, pointer, mask, vl) class RISCVUSStoreMasked : DefaultAttrsIntrinsic<[], - [llvm_anyvector_ty, - LLVMPointerType>, + [llvm_anyvector_ty, llvm_ptr_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { @@ -269,8 +256,7 @@ // Input: (vector_in, pointer, stride, vl) class RISCVSStore : DefaultAttrsIntrinsic<[], - [llvm_anyvector_ty, - LLVMPointerType>, + [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty, LLVMMatchType<1>], [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { let VLOperand = 3; @@ -279,8 +265,7 @@ // Input: (vector_in, pointer, stirde, mask, vl) class RISCVSStoreMasked : DefaultAttrsIntrinsic<[], - [llvm_anyvector_ty, - LLVMPointerType>, llvm_anyint_ty, + [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { let VLOperand = 4; @@ -289,8 +274,7 @@ // Input: (vector_in, pointer, index, vl) class RISCVIStore : DefaultAttrsIntrinsic<[], - [llvm_anyvector_ty, - LLVMPointerType>, + [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyint_ty, llvm_anyint_ty], [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { let VLOperand = 3; @@ -299,8 +283,7 @@ // Input: (vector_in, pointer, index, mask, vl) class RISCVIStoreMasked : DefaultAttrsIntrinsic<[], - [llvm_anyvector_ty, - LLVMPointerType>, llvm_anyvector_ty, + [llvm_anyvector_ty, llvm_ptr_ty, llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic { let VLOperand = 4; Index: llvm/include/llvm/IR/IntrinsicsSystemZ.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsSystemZ.td +++ llvm/include/llvm/IR/IntrinsicsSystemZ.td @@ -222,7 +222,7 @@ def int_s390_etnd : ClangBuiltin<"__builtin_tx_nesting_depth">, Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>; - def int_s390_ntstg : Intrinsic<[], [llvm_i64_ty, llvm_ptr64_ty], + def int_s390_ntstg : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty], [IntrArgMemOnly, IntrWriteMem]>; def int_s390_ppa_txassist : ClangBuiltin<"__builtin_tx_assist">, Index: llvm/include/llvm/IR/IntrinsicsWebAssembly.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsWebAssembly.td +++ llvm/include/llvm/IR/IntrinsicsWebAssembly.td @@ -12,7 +12,7 @@ //===----------------------------------------------------------------------===// // Type definition for a table in an intrinsic -def llvm_table_ty : LLVMQualPointerType; +def llvm_table_ty : LLVMQualPointerType<1>; let TargetPrefix = "wasm" in { // All intrinsics start with "llvm.wasm.". @@ -144,18 +144,18 @@ // These don't use default attributes, because they are not nosync. def int_wasm_memory_atomic_wait32 : Intrinsic<[llvm_i32_ty], - [LLVMPointerType, llvm_i32_ty, llvm_i64_ty], + [llvm_ptr_ty, llvm_i32_ty, llvm_i64_ty], [IntrInaccessibleMemOrArgMemOnly, ReadOnly>, NoCapture>, IntrHasSideEffects], "", [SDNPMemOperand]>; def int_wasm_memory_atomic_wait64 : Intrinsic<[llvm_i32_ty], - [LLVMPointerType, llvm_i64_ty, llvm_i64_ty], + [llvm_ptr_ty, llvm_i64_ty, llvm_i64_ty], [IntrInaccessibleMemOrArgMemOnly, ReadOnly>, NoCapture>, IntrHasSideEffects], "", [SDNPMemOperand]>; def int_wasm_memory_atomic_notify: - Intrinsic<[llvm_i32_ty], [LLVMPointerType, llvm_i32_ty], + Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty], [IntrInaccessibleMemOnly, NoCapture>, IntrHasSideEffects], "", [SDNPMemOperand]>; Index: llvm/include/llvm/IR/IntrinsicsX86.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsX86.td +++ llvm/include/llvm/IR/IntrinsicsX86.td @@ -2558,7 +2558,7 @@ DefaultAttrsIntrinsic<[llvm_i32_ty], [llvm_x86mmx_ty], [IntrNoMem]>; def int_x86_mmx_movnt_dq : ClangBuiltin<"__builtin_ia32_movntq">, - Intrinsic<[], [llvm_ptrx86mmx_ty, llvm_x86mmx_ty], []>; + Intrinsic<[], [llvm_ptr_ty, llvm_x86mmx_ty], []>; def int_x86_mmx_palignr_b : ClangBuiltin<"__builtin_ia32_palignr">, DefaultAttrsIntrinsic<[llvm_x86mmx_ty], Index: llvm/lib/IR/Function.cpp =================================================================== --- llvm/lib/IR/Function.cpp +++ llvm/lib/IR/Function.cpp @@ -1167,22 +1167,17 @@ return; case IIT_EXTERNREF: OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 10)); - OutputTable.push_back(IITDescriptor::get(IITDescriptor::Struct, 0)); return; case IIT_FUNCREF: OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 20)); - OutputTable.push_back(IITDescriptor::get(IITDescriptor::Integer, 8)); return; case IIT_PTR: OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, 0)); - DecodeIITType(NextElt, Infos, Info, OutputTable); return; - case IIT_ANYPTR: { // [ANYPTR addrspace, subtype] + case IIT_ANYPTR: // [ANYPTR addrspace] OutputTable.push_back(IITDescriptor::get(IITDescriptor::Pointer, Infos[NextElt++])); - DecodeIITType(NextElt, Infos, Info, OutputTable); return; - } case IIT_ARG: { unsigned ArgInfo = (NextElt == Infos.size() ? 0 : Infos[NextElt++]); OutputTable.push_back(IITDescriptor::get(IITDescriptor::Argument, ArgInfo)); @@ -1352,8 +1347,7 @@ return VectorType::get(DecodeFixedType(Infos, Tys, Context), D.Vector_Width); case IITDescriptor::Pointer: - return PointerType::get(DecodeFixedType(Infos, Tys, Context), - D.Pointer_AddressSpace); + return PointerType::get(Context, D.Pointer_AddressSpace); case IITDescriptor::Struct: { SmallVector Elts; for (unsigned i = 0, e = D.Struct_NumElements; i != e; ++i) @@ -1530,33 +1524,7 @@ } case IITDescriptor::Pointer: { PointerType *PT = dyn_cast(Ty); - if (!PT || PT->getAddressSpace() != D.Pointer_AddressSpace) - return true; - if (!PT->isOpaque()) { - /* Manually consume a pointer to empty struct descriptor, which is - * used for externref. We don't want to enforce that the struct is - * anonymous in this case. (This renders externref intrinsics - * non-unique, but this will go away with opaque pointers anyway.) */ - if (Infos.front().Kind == IITDescriptor::Struct && - Infos.front().Struct_NumElements == 0) { - Infos = Infos.slice(1); - return false; - } - return matchIntrinsicType(PT->getNonOpaquePointerElementType(), Infos, - ArgTys, DeferredChecks, IsDeferredCheck); - } - // Consume IIT descriptors relating to the pointer element type. - // FIXME: Intrinsic type matching of nested single value types or even - // aggregates doesn't work properly with opaque pointers but hopefully - // doesn't happen in practice. - while (Infos.front().Kind == IITDescriptor::Pointer || - Infos.front().Kind == IITDescriptor::Vector) - Infos = Infos.slice(1); - assert((Infos.front().Kind != IITDescriptor::Argument || - Infos.front().getArgumentKind() == IITDescriptor::AK_MatchType) && - "Unsupported polymorphic pointer type with opaque pointer"); - Infos = Infos.slice(1); - return false; + return !PT || PT->getAddressSpace() != D.Pointer_AddressSpace; } case IITDescriptor::Struct: { Index: llvm/test/TableGen/intrinsic-pointer-to-any.td =================================================================== --- llvm/test/TableGen/intrinsic-pointer-to-any.td +++ /dev/null @@ -1,12 +0,0 @@ -// RUN: llvm-tblgen -gen-intrinsic-impl -I %p/../../include %s -DTEST_INTRINSICS_SUPPRESS_DEFS | FileCheck %s - -// This test is validating that it an Intrinsic with an LLVMPointerType to -// llvm_any_ty still properly work after r363233. That patch rewrote the -// substitution handling code in the Intrinsic Emitter, and didn't consider this -// case, so TableGen would hit an assertion in EncodeFixedType that was checking -// to ensure that the substitution being processed was correctly replaced. - -include "llvm/IR/Intrinsics.td" - -def int_has_ptr_to_any : Intrinsic<[LLVMPointerType, llvm_i8_ty]>; -// CHECK: /* 0 */ 21, 14, 15, 0, 2, 0