Index: llvm/include/llvm/IR/IntrinsicsAMDGPU.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -11,10 +11,10 @@ //===----------------------------------------------------------------------===// class AMDGPUReadPreloadRegisterIntrinsic - : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>; + : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; class AMDGPUReadPreloadRegisterIntrinsicNamed - : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>, GCCBuiltin; + : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>, GCCBuiltin; // Used to tag image and resource intrinsics with information used to generate // mem operands. @@ -48,35 +48,35 @@ defm int_r600_read_tidig : AMDGPUReadPreloadRegisterIntrinsic_xyz; def int_r600_group_barrier : GCCBuiltin<"__builtin_r600_group_barrier">, - Intrinsic<[], [], [IntrConvergent]>; + Intrinsic<[], [], [IntrConvergent, IntrWillReturn]>; // AS 7 is PARAM_I_ADDRESS, used for kernel arguments def int_r600_implicitarg_ptr : GCCBuiltin<"__builtin_r600_implicitarg_ptr">, Intrinsic<[LLVMQualPointerType], [], - [IntrNoMem, IntrSpeculatable]>; + [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_r600_rat_store_typed : // 1st parameter: Data // 2nd parameter: Index // 3rd parameter: Constant RAT ID - Intrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], []>, + Intrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], [IntrWillReturn]>, GCCBuiltin<"__builtin_r600_rat_store_typed">; def int_r600_recipsqrt_ieee : Intrinsic< - [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] + [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_r600_recipsqrt_clamped : Intrinsic< - [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] + [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_r600_cube : Intrinsic< - [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable] + [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_r600_store_stream_output : Intrinsic< - [], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [] + [], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrWillReturn] >; class TextureIntrinsicFloatInput : Intrinsic<[llvm_v4f32_ty], [ @@ -90,7 +90,7 @@ llvm_i32_ty, // coord_type_y llvm_i32_ty, // coord_type_z llvm_i32_ty], // coord_type_w - [IntrNoMem] + [IntrNoMem, IntrWillReturn] >; class TextureIntrinsicInt32Input : Intrinsic<[llvm_v4i32_ty], [ @@ -104,11 +104,11 @@ llvm_i32_ty, // coord_type_y llvm_i32_ty, // coord_type_z llvm_i32_ty], // coord_type_w - [IntrNoMem] + [IntrNoMem, IntrWillReturn] >; def int_r600_store_swizzle : - Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [] + Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], [IntrWillReturn] >; def int_r600_tex : TextureIntrinsicFloatInput; @@ -123,10 +123,10 @@ def int_r600_ddy : TextureIntrinsicFloatInput; def int_r600_dot4 : Intrinsic<[llvm_float_ty], - [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable] + [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; -def int_r600_kill : Intrinsic<[], [llvm_float_ty], []>; +def int_r600_kill : Intrinsic<[], [llvm_float_ty], [IntrWillReturn]>; } // End TargetPrefix = "r600" @@ -142,42 +142,42 @@ def int_amdgcn_dispatch_ptr : Intrinsic<[LLVMQualPointerType], [], - [Align, IntrNoMem, IntrSpeculatable]>; + [Align, IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_queue_ptr : GCCBuiltin<"__builtin_amdgcn_queue_ptr">, Intrinsic<[LLVMQualPointerType], [], - [Align, IntrNoMem, IntrSpeculatable]>; + [Align, IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_kernarg_segment_ptr : GCCBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">, Intrinsic<[LLVMQualPointerType], [], - [Align, IntrNoMem, IntrSpeculatable]>; + [Align, IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_implicitarg_ptr : GCCBuiltin<"__builtin_amdgcn_implicitarg_ptr">, Intrinsic<[LLVMQualPointerType], [], - [Align, IntrNoMem, IntrSpeculatable]>; + [Align, IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_groupstaticsize : GCCBuiltin<"__builtin_amdgcn_groupstaticsize">, - Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>; + Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_dispatch_id : GCCBuiltin<"__builtin_amdgcn_dispatch_id">, - Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>; + Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_implicit_buffer_ptr : GCCBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">, Intrinsic<[LLVMQualPointerType], [], - [Align, IntrNoMem, IntrSpeculatable]>; + [Align, IntrNoMem, IntrSpeculatable, IntrWillReturn]>; // Set EXEC to the 64-bit value given. // This is always moved to the beginning of the basic block. // FIXME: Should be mangled for wave size. def int_amdgcn_init_exec : Intrinsic<[], [llvm_i64_ty], // 64-bit literal constant - [IntrConvergent, ImmArg>]>; + [IntrConvergent, ImmArg>, IntrWillReturn]>; // Set EXEC according to a thread count packed in an SGPR input: // thread_count = (input >> bitoffset) & 0x7f; @@ -185,11 +185,11 @@ def int_amdgcn_init_exec_from_input : Intrinsic<[], [llvm_i32_ty, // 32-bit SGPR input llvm_i32_ty], // bit offset of the thread count - [IntrConvergent, ImmArg>]>; + [IntrConvergent, ImmArg>, IntrWillReturn]>; def int_amdgcn_wavefrontsize : GCCBuiltin<"__builtin_amdgcn_wavefrontsize">, - Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>; + Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; //===----------------------------------------------------------------------===// @@ -206,13 +206,13 @@ [ImmArg>, IntrNoMem, IntrHasSideEffects]>; def int_amdgcn_s_barrier : GCCBuiltin<"__builtin_amdgcn_s_barrier">, - Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent]>; + Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn]>; def int_amdgcn_wave_barrier : GCCBuiltin<"__builtin_amdgcn_wave_barrier">, - Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent]>; + Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrConvergent, IntrWillReturn]>; def int_amdgcn_s_waitcnt : GCCBuiltin<"__builtin_amdgcn_s_waitcnt">, - Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects]>; + Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_div_scale : Intrinsic< // 1st parameter: Numerator @@ -221,160 +221,160 @@ // (0 = Denominator, 1 = Numerator). [llvm_anyfloat_ty, llvm_i1_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty], - [IntrNoMem, IntrSpeculatable, ImmArg>] + [IntrNoMem, IntrSpeculatable, ImmArg>, IntrWillReturn] >; def int_amdgcn_div_fmas : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_div_fixup : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_trig_preop : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_sin : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cos : Intrinsic< - [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] + [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_log_clamp : Intrinsic< - [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] + [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_fmul_legacy : GCCBuiltin<"__builtin_amdgcn_fmul_legacy">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_rcp : Intrinsic< - [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] + [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_rcp_legacy : GCCBuiltin<"__builtin_amdgcn_rcp_legacy">, Intrinsic<[llvm_float_ty], [llvm_float_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_rsq : Intrinsic< - [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] + [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_rsq_legacy : GCCBuiltin<"__builtin_amdgcn_rsq_legacy">, Intrinsic< - [llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable] + [llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // out = 1.0 / sqrt(a) result clamped to +/- max_float. def int_amdgcn_rsq_clamp : Intrinsic< - [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]>; + [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn]>; def int_amdgcn_ldexp : Intrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_frexp_mant : Intrinsic< - [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] + [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_frexp_exp : Intrinsic< - [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable] + [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0 // and always uses rtz, so is not suitable for implementing the OpenCL // fract function. It should be ok on VI. def int_amdgcn_fract : Intrinsic< - [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] + [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pkrtz : GCCBuiltin<"__builtin_amdgcn_cvt_pkrtz">, Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pknorm_i16 : GCCBuiltin<"__builtin_amdgcn_cvt_pknorm_i16">, Intrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pknorm_u16 : GCCBuiltin<"__builtin_amdgcn_cvt_pknorm_u16">, Intrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pk_i16 : GCCBuiltin<"__builtin_amdgcn_cvt_pk_i16">, Intrinsic< [llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pk_u16 : GCCBuiltin<"__builtin_amdgcn_cvt_pk_u16">, Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_class : Intrinsic< [llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_fmed3 : GCCBuiltin<"__builtin_amdgcn_fmed3">, Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cubeid : GCCBuiltin<"__builtin_amdgcn_cubeid">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cubema : GCCBuiltin<"__builtin_amdgcn_cubema">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cubesc : GCCBuiltin<"__builtin_amdgcn_cubesc">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cubetc : GCCBuiltin<"__builtin_amdgcn_cubetc">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // v_ffbh_i32, as opposed to v_ffbh_u32. For v_ffbh_u32, llvm.ctlz // should be used. def int_amdgcn_sffbh : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // v_mad_f32|f16/v_mac_f32|f16, selected regardless of denorm support. def int_amdgcn_fmad_ftz : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // Fields should mirror atomicrmw @@ -384,7 +384,8 @@ llvm_i32_ty, // ordering llvm_i32_ty, // scope llvm_i1_ty], // isVolatile - [IntrArgMemOnly, NoCapture>, ImmArg>, ImmArg>, ImmArg>], "", + [IntrArgMemOnly, IntrWillReturn, NoCapture>, + ImmArg>, ImmArg>, ImmArg>], "", [SDNPMemOperand] >; @@ -399,7 +400,8 @@ llvm_i32_ty, // ordering llvm_i32_ty, // scope llvm_i1_ty], // isVolatile - [IntrArgMemOnly, NoCapture>, ImmArg>, ImmArg>, ImmArg>] + [IntrArgMemOnly, IntrWillReturn, NoCapture>, + ImmArg>, ImmArg>, ImmArg>] >; // FIXME: The m0 argument should be moved after the normal arguments @@ -416,7 +418,7 @@ // gfx10: bits 24-27 indicate the number of active threads/dwords llvm_i1_ty, // wave release, usually set to 1 llvm_i1_ty], // wave done, set to 1 for the last ordered instruction - [NoCapture>, + [IntrWillReturn, NoCapture>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg> ] @@ -426,7 +428,8 @@ [llvm_i32_ty], [llvm_anyptr_ty, // LDS or GDS ptr llvm_i1_ty], // isVolatile - [IntrConvergent, IntrArgMemOnly, NoCapture>, ImmArg>], + [IntrConvergent, IntrWillReturn, IntrArgMemOnly, + NoCapture>, ImmArg>], "", [SDNPMemOperand] >; @@ -697,11 +700,15 @@ llvm_i1_ty], []), // unorm(imm) [llvm_i32_ty, // texfailctrl(imm; bit 0 = tfe, bit 1 = lwe) llvm_i32_ty]), // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc) + !listconcat(props, !if(P_.IsAtomic, [], [ImmArg.DmaskArgIndex>>]), !if(P_.IsSample, [ImmArg.UnormArgIndex>>], []), + [IntrWillReturn], [ImmArg.TexFailCtrlArgIndex>>, ImmArg.CachePolicyArgIndex>>]), + + "", sdnodeprops>, AMDGPURsrcIntrinsic { @@ -755,15 +762,15 @@ AMDGPUImageDMaskIntrinsic; defm int_amdgcn_image_load_mip : AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_any_ty], [], - [IntrReadMem], [SDNPMemOperand], 1>, + [IntrReadMem, IntrWillReturn], [SDNPMemOperand], 1>, AMDGPUImageDMaskIntrinsic; defm int_amdgcn_image_store : AMDGPUImageDimIntrinsicsAll< "STORE", [], [AMDGPUArg], - [IntrWriteMem], [SDNPMemOperand]>; + [IntrWriteMem, IntrWillReturn], [SDNPMemOperand]>; defm int_amdgcn_image_store_mip : AMDGPUImageDimIntrinsicsNoMsaa< "STORE_MIP", [], [AMDGPUArg], - [IntrWriteMem], [SDNPMemOperand], 1>; + [IntrWriteMem, IntrWillReturn], [SDNPMemOperand], 1>; ////////////////////////////////////////////////////////////////////////// // sample and getlod intrinsics @@ -861,7 +868,8 @@ llvm_i32_ty, // offset(SGPR/VGPR/imm) llvm_i1_ty, // glc(imm) llvm_i1_ty], // slc(imm) - [IntrReadMem, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, + [IntrReadMem, IntrWillReturn, + ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_buffer_load_format : AMDGPUBufferLoad; def int_amdgcn_buffer_load : AMDGPUBufferLoad; @@ -871,7 +879,7 @@ [llvm_v4i32_ty, // rsrc(SGPR) llvm_i32_ty, // byte offset(SGPR/imm) llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 2 = dlc) - [IntrNoMem, ImmArg>]>, + [IntrNoMem, IntrWillReturn, ImmArg>]>, AMDGPURsrcIntrinsic<0>; class AMDGPUBufferStore : Intrinsic < @@ -882,7 +890,8 @@ llvm_i32_ty, // offset(SGPR/VGPR/imm) llvm_i1_ty, // glc(imm) llvm_i1_ty], // slc(imm) - [IntrWriteMem, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, + [IntrWriteMem, IntrWillReturn, + ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; def int_amdgcn_buffer_store_format : AMDGPUBufferStore; def int_amdgcn_buffer_store : AMDGPUBufferStore; @@ -903,7 +912,7 @@ // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) - [IntrReadMem, ImmArg>], "", [SDNPMemOperand]>, + [IntrReadMem, IntrWillReturn, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad; def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad; @@ -918,7 +927,7 @@ // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) - [IntrReadMem, ImmArg>], "", [SDNPMemOperand]>, + [IntrReadMem, IntrWillReturn, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad; def int_amdgcn_struct_buffer_load : AMDGPUStructBufferLoad; @@ -933,7 +942,7 @@ // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) - [IntrWriteMem, ImmArg>], "", [SDNPMemOperand]>, + [IntrWriteMem, IntrWillReturn, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; def int_amdgcn_raw_buffer_store_format : AMDGPURawBufferStore; def int_amdgcn_raw_buffer_store : AMDGPURawBufferStore; @@ -949,7 +958,7 @@ // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) - [IntrWriteMem, ImmArg>], "", [SDNPMemOperand]>, + [IntrWriteMem, IntrWillReturn, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore; def int_amdgcn_struct_buffer_store : AMDGPUStructBufferStore; @@ -961,7 +970,7 @@ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // cachepolicy(imm; bit 1 = slc) - [ImmArg>], "", [SDNPMemOperand]>, + [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1, 0>; def int_amdgcn_raw_buffer_atomic_swap : AMDGPURawBufferAtomic; def int_amdgcn_raw_buffer_atomic_add : AMDGPURawBufferAtomic; @@ -983,7 +992,7 @@ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // cachepolicy(imm; bit 1 = slc) - [ImmArg>], "", [SDNPMemOperand]>, + [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<2, 0>; class AMDGPUStructBufferAtomic : Intrinsic < @@ -994,7 +1003,7 @@ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // cachepolicy(imm; bit 1 = slc) - [ImmArg>], "", [SDNPMemOperand]>, + [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1, 0>; def int_amdgcn_struct_buffer_atomic_swap : AMDGPUStructBufferAtomic; def int_amdgcn_struct_buffer_atomic_add : AMDGPUStructBufferAtomic; @@ -1017,7 +1026,7 @@ llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling) llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling) llvm_i32_ty], // cachepolicy(imm; bit 1 = slc) - [ImmArg>], "", [SDNPMemOperand]>, + [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<2, 0>; // Obsolescent tbuffer intrinsics. @@ -1032,7 +1041,8 @@ llvm_i32_ty, // nfmt(imm) llvm_i1_ty, // glc(imm) llvm_i1_ty], // slc(imm) - [IntrReadMem, ImmArg>, ImmArg>, ImmArg>, + [IntrReadMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; @@ -1048,7 +1058,8 @@ llvm_i32_ty, // nfmt(imm) llvm_i1_ty, // glc(imm) llvm_i1_ty], // slc(imm) - [IntrWriteMem, ImmArg>, ImmArg>, ImmArg>, + [IntrWriteMem, IntrWillReturn, ImmArg>, + ImmArg>, ImmArg>, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; @@ -1066,7 +1077,8 @@ // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) - [IntrReadMem, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, + [IntrReadMem, IntrWillReturn, + ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_raw_tbuffer_store : Intrinsic < @@ -1080,7 +1092,8 @@ // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) - [IntrWriteMem, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, + [IntrWriteMem, IntrWillReturn, + ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; def int_amdgcn_struct_tbuffer_load : Intrinsic < @@ -1094,7 +1107,8 @@ // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) - [IntrReadMem, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, + [IntrReadMem, IntrWillReturn, + ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<0>; def int_amdgcn_struct_tbuffer_store : Intrinsic < @@ -1109,7 +1123,8 @@ // bit 1 = slc, // bit 2 = dlc on gfx10+), // swizzled buffer (bit 3 = swz)) - [IntrWriteMem, ImmArg>, ImmArg>], "", [SDNPMemOperand]>, + [IntrWriteMem, IntrWillReturn, + ImmArg>, ImmArg>], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1>; class AMDGPUBufferAtomic : Intrinsic < @@ -1119,7 +1134,7 @@ llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(SGPR/VGPR/imm) llvm_i1_ty], // slc(imm) - [ImmArg>], "", [SDNPMemOperand]>, + [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1, 0>; def int_amdgcn_buffer_atomic_swap : AMDGPUBufferAtomic; def int_amdgcn_buffer_atomic_add : AMDGPUBufferAtomic; @@ -1139,7 +1154,7 @@ llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(SGPR/VGPR/imm) llvm_i1_ty], // slc(imm) - [ImmArg>], "", [SDNPMemOperand]>, + [ImmArg>, IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<2, 0>; } // defset AMDGPUBufferIntrinsics @@ -1156,7 +1171,9 @@ llvm_i1_ty, // done llvm_i1_ty // vm ], - [ImmArg>, ImmArg>, ImmArg>, ImmArg>, IntrWriteMem, IntrInaccessibleMemOnly] + [ImmArg>, ImmArg>, ImmArg>, + ImmArg>, IntrWriteMem, IntrInaccessibleMemOnly, + IntrWillReturn] >; // exp with compr bit set. @@ -1167,44 +1184,50 @@ LLVMMatchType<0>, // src1 llvm_i1_ty, // done llvm_i1_ty], // vm - [ImmArg>, ImmArg>, ImmArg>, ImmArg>, IntrWriteMem, IntrInaccessibleMemOnly] + [ImmArg>, ImmArg>, ImmArg>, + ImmArg>, IntrWriteMem, IntrInaccessibleMemOnly, + IntrWillReturn] >; def int_amdgcn_buffer_wbinvl1_sc : GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">, - Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>; + Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_buffer_wbinvl1 : GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1">, - Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>; + Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_s_dcache_inv : GCCBuiltin<"__builtin_amdgcn_s_dcache_inv">, - Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>; + Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_s_memtime : GCCBuiltin<"__builtin_amdgcn_s_memtime">, - Intrinsic<[llvm_i64_ty], []>; + Intrinsic<[llvm_i64_ty], [], [IntrWillReturn]>; def int_amdgcn_s_sleep : GCCBuiltin<"__builtin_amdgcn_s_sleep">, - Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects]> { + Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, + IntrHasSideEffects, IntrWillReturn]> { } def int_amdgcn_s_incperflevel : GCCBuiltin<"__builtin_amdgcn_s_incperflevel">, - Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects]> { + Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, + IntrHasSideEffects, IntrWillReturn]> { } def int_amdgcn_s_decperflevel : GCCBuiltin<"__builtin_amdgcn_s_decperflevel">, - Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, IntrHasSideEffects]> { + Intrinsic<[], [llvm_i32_ty], [ImmArg>, IntrNoMem, + IntrHasSideEffects, IntrWillReturn]> { } def int_amdgcn_s_getreg : GCCBuiltin<"__builtin_amdgcn_s_getreg">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty], - [IntrInaccessibleMemOnly, IntrReadMem, IntrSpeculatable, ImmArg>] + [IntrInaccessibleMemOnly, IntrReadMem, IntrSpeculatable, + IntrWillReturn, ImmArg>] >; // int_amdgcn_s_getpc is provided to allow a specific style of position @@ -1215,7 +1238,8 @@ // especially as we explicitly use IntrNoMem to allow optimizations. def int_amdgcn_s_getpc : GCCBuiltin<"__builtin_amdgcn_s_getpc">, - Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>; + Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable, + IntrWillReturn]>; // __builtin_amdgcn_interp_mov , , , // param values: 0 = P10, 1 = P20, 2 = P0 @@ -1223,7 +1247,8 @@ GCCBuiltin<"__builtin_amdgcn_interp_mov">, Intrinsic<[llvm_float_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable, ImmArg>, ImmArg>, ImmArg>]>; + [IntrNoMem, IntrSpeculatable, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; // __builtin_amdgcn_interp_p1 , , , // This intrinsic reads from lds, but the memory values are constant, @@ -1232,14 +1257,16 @@ GCCBuiltin<"__builtin_amdgcn_interp_p1">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable, ImmArg>, ImmArg>]>; + [IntrNoMem, IntrSpeculatable, IntrWillReturn, + ImmArg>, ImmArg>]>; // __builtin_amdgcn_interp_p2 , , , , def int_amdgcn_interp_p2 : GCCBuiltin<"__builtin_amdgcn_interp_p2">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable, ImmArg>, ImmArg>]>; + [IntrNoMem, IntrSpeculatable, IntrWillReturn, + ImmArg>, ImmArg>]>; // See int_amdgcn_v_interp_p1 for why this is IntrNoMem. // __builtin_amdgcn_interp_p1_f16 , , , , @@ -1247,120 +1274,130 @@ GCCBuiltin<"__builtin_amdgcn_interp_p1_f16">, Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable, ImmArg>, ImmArg>, ImmArg>]>; + [IntrNoMem, IntrSpeculatable, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; // __builtin_amdgcn_interp_p2_f16 , , , , , def int_amdgcn_interp_p2_f16 : GCCBuiltin<"__builtin_amdgcn_interp_p2_f16">, Intrinsic<[llvm_half_ty], [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable, ImmArg>, ImmArg>, ImmArg>]>; + [IntrNoMem, IntrSpeculatable, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; // Pixel shaders only: whether the current pixel is live (i.e. not a helper // invocation for derivative computation). def int_amdgcn_ps_live : Intrinsic < [llvm_i1_ty], [], - [IntrNoMem]>; + [IntrNoMem, IntrWillReturn]>; def int_amdgcn_mbcnt_lo : GCCBuiltin<"__builtin_amdgcn_mbcnt_lo">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], + [IntrNoMem, IntrWillReturn]>; def int_amdgcn_mbcnt_hi : GCCBuiltin<"__builtin_amdgcn_mbcnt_hi">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], + [IntrNoMem, IntrWillReturn]>; // llvm.amdgcn.ds.swizzle src offset def int_amdgcn_ds_swizzle : GCCBuiltin<"__builtin_amdgcn_ds_swizzle">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrConvergent, ImmArg>]>; + [IntrNoMem, IntrConvergent, IntrWillReturn, + ImmArg>]>; def int_amdgcn_ubfe : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_sbfe : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_lerp : GCCBuiltin<"__builtin_amdgcn_lerp">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_sad_u8 : GCCBuiltin<"__builtin_amdgcn_sad_u8">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_msad_u8 : GCCBuiltin<"__builtin_amdgcn_msad_u8">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_sad_hi_u8 : GCCBuiltin<"__builtin_amdgcn_sad_hi_u8">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_sad_u16 : GCCBuiltin<"__builtin_amdgcn_sad_u16">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_qsad_pk_u16_u8 : GCCBuiltin<"__builtin_amdgcn_qsad_pk_u16_u8">, Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_mqsad_pk_u16_u8 : GCCBuiltin<"__builtin_amdgcn_mqsad_pk_u16_u8">, Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_mqsad_u32_u8 : GCCBuiltin<"__builtin_amdgcn_mqsad_u32_u8">, Intrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_cvt_pk_u8_f32 : GCCBuiltin<"__builtin_amdgcn_cvt_pk_u8_f32">, Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_icmp : Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty, LLVMMatchType<1>, llvm_i32_ty], - [IntrNoMem, IntrConvergent, ImmArg>]>; + [IntrNoMem, IntrConvergent, IntrWillReturn, + ImmArg>]>; def int_amdgcn_fcmp : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>, llvm_i32_ty], - [IntrNoMem, IntrConvergent, ImmArg>]>; + [IntrNoMem, IntrConvergent, IntrWillReturn, + ImmArg>]>; def int_amdgcn_ballot : - Intrinsic<[llvm_anyint_ty], [llvm_i1_ty], [IntrNoMem, IntrConvergent]>; + Intrinsic<[llvm_anyint_ty], [llvm_i1_ty], + [IntrNoMem, IntrConvergent, IntrWillReturn]>; def int_amdgcn_readfirstlane : GCCBuiltin<"__builtin_amdgcn_readfirstlane">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], + [IntrNoMem, IntrConvergent, IntrWillReturn]>; // The lane argument must be uniform across the currently active threads of the // current wave. Otherwise, the result is undefined. def int_amdgcn_readlane : GCCBuiltin<"__builtin_amdgcn_readlane">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], + [IntrNoMem, IntrConvergent, IntrWillReturn]>; // The value to write and lane select arguments must be uniform across the // currently active threads of the current wave. Otherwise, the result is @@ -1372,28 +1409,28 @@ llvm_i32_ty, // uniform lane select llvm_i32_ty // returned by all lanes other than the selected one ], - [IntrNoMem, IntrConvergent] + [IntrNoMem, IntrConvergent, IntrWillReturn] >; // FIXME: Deprecated. This is equivalent to llvm.fshr def int_amdgcn_alignbit : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_alignbyte : GCCBuiltin<"__builtin_amdgcn_alignbyte">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_mul_i24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; def int_amdgcn_mul_u24 : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // llvm.amdgcn.ds.gws.init(i32 bar_val, i32 resource_id) @@ -1404,7 +1441,8 @@ GCCBuiltin<"__builtin_amdgcn_ds_gws_init">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrWriteMem, IntrInaccessibleMemOnly], "", + [IntrConvergent, IntrWriteMem, + IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; @@ -1415,7 +1453,7 @@ GCCBuiltin<"__builtin_amdgcn_ds_gws_barrier">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrInaccessibleMemOnly], "", + [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; @@ -1424,7 +1462,7 @@ GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_v">, Intrinsic<[], [llvm_i32_ty], - [IntrConvergent, IntrInaccessibleMemOnly], "", + [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; @@ -1433,7 +1471,7 @@ GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_br">, Intrinsic<[], [llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrInaccessibleMemOnly], "", + [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; @@ -1442,7 +1480,7 @@ GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_p">, Intrinsic<[], [llvm_i32_ty], - [IntrConvergent, IntrInaccessibleMemOnly], "", + [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; @@ -1451,7 +1489,7 @@ GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_release_all">, Intrinsic<[], [llvm_i32_ty], - [IntrConvergent, IntrInaccessibleMemOnly], "", + [IntrConvergent, IntrInaccessibleMemOnly, IntrWillReturn], "", [SDNPMemOperand] >; @@ -1459,24 +1497,24 @@ // Copies the source value to the destination value, with the guarantee that // the source value is computed as if the entire program were executed in WQM. def int_amdgcn_wqm : Intrinsic<[llvm_any_ty], - [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] + [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // Copies the source value to the destination value, such that the source // is computed as if the entire program were executed in WQM if any other // program code executes in WQM. def int_amdgcn_softwqm : Intrinsic<[llvm_any_ty], - [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] + [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // Return true if at least one thread within the pixel quad passes true into // the function. def int_amdgcn_wqm_vote : Intrinsic<[llvm_i1_ty], - [llvm_i1_ty], [IntrNoMem, IntrConvergent] + [llvm_i1_ty], [IntrNoMem, IntrConvergent, IntrWillReturn] >; // If false, set EXEC=0 for the current thread until the end of program. -// FIXME: Should this be IntrNoMem, IntrHasSideEffects? +// FIXME: Should this be IntrNoMem, IntrHasSideEffects, or IntrWillReturn? def int_amdgcn_kill : Intrinsic<[], [llvm_i1_ty], []>; // Copies the active channels of the source value to the destination value, @@ -1485,7 +1523,8 @@ // enabled, with a few exceptions: - Phi nodes with require WWM return an // undefined value. def int_amdgcn_wwm : Intrinsic<[llvm_any_ty], - [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrConvergent] + [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, + IntrConvergent, IntrWillReturn] >; // Given a value, copies it while setting all the inactive lanes to a given @@ -1496,18 +1535,18 @@ Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, // value to be copied LLVMMatchType<0>], // value for the inactive lanes to take - [IntrNoMem, IntrConvergent]>; + [IntrNoMem, IntrConvergent, IntrWillReturn]>; // Return if the given flat pointer points to a local memory address. def int_amdgcn_is_shared : GCCBuiltin<"__builtin_amdgcn_is_shared">, Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], - [IntrNoMem, IntrSpeculatable, NoCapture>] + [IntrNoMem, IntrSpeculatable, NoCapture>, IntrWillReturn] >; // Return if the given flat pointer points to a prvate memory address. def int_amdgcn_is_private : GCCBuiltin<"__builtin_amdgcn_is_private">, Intrinsic<[llvm_i1_ty], [llvm_ptr_ty], - [IntrNoMem, IntrSpeculatable, NoCapture>] + [IntrNoMem, IntrSpeculatable, NoCapture>, IntrWillReturn] >; //===----------------------------------------------------------------------===// @@ -1516,11 +1555,11 @@ def int_amdgcn_s_dcache_inv_vol : GCCBuiltin<"__builtin_amdgcn_s_dcache_inv_vol">, - Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>; + Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_buffer_wbinvl1_vol : GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_vol">, - Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>; + Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; //===----------------------------------------------------------------------===// // VI Intrinsics @@ -1530,8 +1569,10 @@ def int_amdgcn_mov_dpp : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i1_ty], [IntrNoMem, IntrConvergent, ImmArg>, - ImmArg>, ImmArg>, ImmArg>]>; + llvm_i1_ty], + [IntrNoMem, IntrConvergent, IntrWillReturn, + ImmArg>, ImmArg>, + ImmArg>, ImmArg>]>; // llvm.amdgcn.update.dpp.i32 // Should be equivalent to: @@ -1541,30 +1582,33 @@ Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty], - [IntrNoMem, IntrConvergent, - ImmArg>, ImmArg>, ImmArg>, ImmArg>]>; + [IntrNoMem, IntrConvergent, IntrWillReturn, + ImmArg>, ImmArg>, + ImmArg>, ImmArg>]>; def int_amdgcn_s_dcache_wb : GCCBuiltin<"__builtin_amdgcn_s_dcache_wb">, - Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>; + Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_s_dcache_wb_vol : GCCBuiltin<"__builtin_amdgcn_s_dcache_wb_vol">, - Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects]>; + Intrinsic<[], [], [IntrNoMem, IntrHasSideEffects, IntrWillReturn]>; def int_amdgcn_s_memrealtime : GCCBuiltin<"__builtin_amdgcn_s_memrealtime">, - Intrinsic<[llvm_i64_ty]>; + Intrinsic<[llvm_i64_ty], [], [IntrWillReturn]>; // llvm.amdgcn.ds.permute def int_amdgcn_ds_permute : GCCBuiltin<"__builtin_amdgcn_ds_permute">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], + [IntrNoMem, IntrConvergent, IntrWillReturn]>; // llvm.amdgcn.ds.bpermute def int_amdgcn_ds_bpermute : GCCBuiltin<"__builtin_amdgcn_ds_bpermute">, - Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>; + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], + [IntrNoMem, IntrConvergent, IntrWillReturn]>; //===----------------------------------------------------------------------===// // GFX10 Intrinsics @@ -1574,13 +1618,15 @@ def int_amdgcn_permlane16 : GCCBuiltin<"__builtin_amdgcn_permlane16">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty], - [IntrNoMem, IntrConvergent, ImmArg>, ImmArg>]>; + [IntrNoMem, IntrConvergent, IntrWillReturn, + ImmArg>, ImmArg>]>; // llvm.amdgcn.permlanex16 def int_amdgcn_permlanex16 : GCCBuiltin<"__builtin_amdgcn_permlanex16">, Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty], - [IntrNoMem, IntrConvergent, ImmArg>, ImmArg>]>; + [IntrNoMem, IntrConvergent, IntrWillReturn, + ImmArg>, ImmArg>]>; // llvm.amdgcn.mov.dpp8.i32 // is a 32-bit constant whose high 8 bits must be zero which selects @@ -1588,11 +1634,13 @@ def int_amdgcn_mov_dpp8 : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i32_ty], - [IntrNoMem, IntrConvergent, ImmArg>]>; + [IntrNoMem, IntrConvergent, IntrWillReturn, + ImmArg>]>; def int_amdgcn_s_get_waveid_in_workgroup : GCCBuiltin<"__builtin_amdgcn_s_get_waveid_in_workgroup">, - Intrinsic<[llvm_i32_ty], [], [IntrReadMem, IntrInaccessibleMemOnly]>; + Intrinsic<[llvm_i32_ty], [], + [IntrReadMem, IntrInaccessibleMemOnly, IntrWillReturn]>; //===----------------------------------------------------------------------===// // Deep learning intrinsics. @@ -1610,7 +1658,7 @@ llvm_float_ty, // %c llvm_i1_ty // %clamp ], - [IntrNoMem, IntrSpeculatable, ImmArg>] + [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // i32 %r = llvm.amdgcn.sdot2(v2i16 %a, v2i16 %b, i32 %c, i1 %clamp) @@ -1625,7 +1673,7 @@ llvm_i32_ty, // %c llvm_i1_ty // %clamp ], - [IntrNoMem, IntrSpeculatable, ImmArg>] + [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // u32 %r = llvm.amdgcn.udot2(v2u16 %a, v2u16 %b, u32 %c, i1 %clamp) @@ -1640,7 +1688,7 @@ llvm_i32_ty, // %c llvm_i1_ty // %clamp ], - [IntrNoMem, IntrSpeculatable, ImmArg>] + [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // i32 %r = llvm.amdgcn.sdot4(v4i8 (as i32) %a, v4i8 (as i32) %b, i32 %c, i1 %clamp) @@ -1655,7 +1703,7 @@ llvm_i32_ty, // %c llvm_i1_ty // %clamp ], - [IntrNoMem, IntrSpeculatable, ImmArg>] + [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // u32 %r = llvm.amdgcn.udot4(v4u8 (as u32) %a, v4u8 (as u32) %b, u32 %c, i1 %clamp) @@ -1670,7 +1718,7 @@ llvm_i32_ty, // %c llvm_i1_ty // %clamp ], - [IntrNoMem, IntrSpeculatable, ImmArg>] + [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // i32 %r = llvm.amdgcn.sdot8(v8i4 (as i32) %a, v8i4 (as i32) %b, i32 %c, i1 %clamp) @@ -1686,7 +1734,7 @@ llvm_i32_ty, // %c llvm_i1_ty // %clamp ], - [IntrNoMem, IntrSpeculatable, ImmArg>] + [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; // u32 %r = llvm.amdgcn.udot8(v8u4 (as u32) %a, v8u4 (as u32) %b, u32 %c, i1 %clamp) @@ -1702,7 +1750,7 @@ llvm_i32_ty, // %c llvm_i1_ty // %clamp ], - [IntrNoMem, IntrSpeculatable, ImmArg>] + [IntrNoMem, IntrSpeculatable, IntrWillReturn, ImmArg>] >; //===----------------------------------------------------------------------===// @@ -1716,14 +1764,15 @@ llvm_i32_ty, // vindex(VGPR) llvm_i32_ty, // offset(SGPR/VGPR/imm) llvm_i1_ty], // slc(imm) - [], "", [SDNPMemOperand]>, + [IntrWillReturn], "", [SDNPMemOperand]>, AMDGPURsrcIntrinsic<1, 0>; class AMDGPUGlobalAtomicNoRtn : Intrinsic < [], [llvm_anyptr_ty, // vaddr llvm_anyfloat_ty], // vdata(VGPR) - [IntrArgMemOnly, NoCapture>], "", [SDNPMemOperand]>; + [IntrArgMemOnly, IntrWillReturn, NoCapture>], "", + [SDNPMemOperand]>; def int_amdgcn_buffer_atomic_fadd : AMDGPUBufferAtomicNoRtn; def int_amdgcn_global_atomic_fadd : AMDGPUGlobalAtomicNoRtn; @@ -1733,143 +1782,165 @@ Intrinsic<[llvm_v32f32_ty], [llvm_float_ty, llvm_float_ty, llvm_v32f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_16x16x1f32 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x1f32">, Intrinsic<[llvm_v16f32_ty], [llvm_float_ty, llvm_float_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_4x4x1f32 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_4x4x1f32">, Intrinsic<[llvm_v4f32_ty], [llvm_float_ty, llvm_float_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_32x32x2f32 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_32x32x2f32">, Intrinsic<[llvm_v16f32_ty], [llvm_float_ty, llvm_float_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_16x16x4f32 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x4f32">, Intrinsic<[llvm_v4f32_ty], [llvm_float_ty, llvm_float_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_32x32x4f16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_32x32x4f16">, Intrinsic<[llvm_v32f32_ty], [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v32f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_16x16x4f16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x4f16">, Intrinsic<[llvm_v16f32_ty], [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_4x4x4f16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_4x4x4f16">, Intrinsic<[llvm_v4f32_ty], [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_32x32x8f16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_32x32x8f16">, Intrinsic<[llvm_v16f32_ty], [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_16x16x16f16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x16f16">, Intrinsic<[llvm_v4f32_ty], [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_i32_32x32x4i8 : GCCBuiltin<"__builtin_amdgcn_mfma_i32_32x32x4i8">, Intrinsic<[llvm_v32i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_v32i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_i32_16x16x4i8 : GCCBuiltin<"__builtin_amdgcn_mfma_i32_16x16x4i8">, Intrinsic<[llvm_v16i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_v16i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_i32_4x4x4i8 : GCCBuiltin<"__builtin_amdgcn_mfma_i32_4x4x4i8">, Intrinsic<[llvm_v4i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_i32_32x32x8i8 : GCCBuiltin<"__builtin_amdgcn_mfma_i32_32x32x8i8">, Intrinsic<[llvm_v16i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_v16i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_i32_16x16x16i8 : GCCBuiltin<"__builtin_amdgcn_mfma_i32_16x16x16i8">, Intrinsic<[llvm_v4i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_32x32x2bf16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_32x32x2bf16">, Intrinsic<[llvm_v32f32_ty], [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v32f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_16x16x2bf16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x2bf16">, Intrinsic<[llvm_v16f32_ty], [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_4x4x2bf16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_4x4x2bf16">, Intrinsic<[llvm_v4f32_ty], [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_32x32x4bf16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_32x32x4bf16">, Intrinsic<[llvm_v16f32_ty], [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; def int_amdgcn_mfma_f32_16x16x8bf16 : GCCBuiltin<"__builtin_amdgcn_mfma_f32_16x16x8bf16">, Intrinsic<[llvm_v4f32_ty], [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], - [IntrConvergent, IntrNoMem, ImmArg>, ImmArg>, ImmArg>]>; + [IntrConvergent, IntrNoMem, IntrWillReturn, + ImmArg>, ImmArg>, ImmArg>]>; //===----------------------------------------------------------------------===// // Special Intrinsics for backend internal use only. No frontend // should emit calls to these. // ===----------------------------------------------------------------------===// def int_amdgcn_if : Intrinsic<[llvm_i1_ty, llvm_anyint_ty], - [llvm_i1_ty], [IntrConvergent] + [llvm_i1_ty], [IntrConvergent, IntrWillReturn] >; def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_anyint_ty], - [llvm_anyint_ty], [IntrConvergent] + [llvm_anyint_ty], [IntrConvergent, IntrWillReturn] >; def int_amdgcn_if_break : Intrinsic<[llvm_anyint_ty], - [llvm_i1_ty, LLVMMatchType<0>], [IntrNoMem, IntrConvergent] + [llvm_i1_ty, LLVMMatchType<0>], + [IntrNoMem, IntrConvergent, IntrWillReturn] >; def int_amdgcn_loop : Intrinsic<[llvm_i1_ty], - [llvm_anyint_ty], [IntrConvergent] + [llvm_anyint_ty], [IntrConvergent, IntrWillReturn] >; -def int_amdgcn_end_cf : Intrinsic<[], [llvm_anyint_ty], [IntrConvergent]>; +def int_amdgcn_end_cf : Intrinsic<[], [llvm_anyint_ty], + [IntrConvergent, IntrWillReturn]>; // Represent unreachable in a divergent region. def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent]>; @@ -1878,12 +1949,12 @@ // pass based on !fpmath metadata. def int_amdgcn_fdiv_fast : Intrinsic< [llvm_float_ty], [llvm_float_ty, llvm_float_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; // Represent a relocation constant. def int_amdgcn_reloc_constant : Intrinsic< [llvm_i32_ty], [llvm_metadata_ty], - [IntrNoMem, IntrSpeculatable] + [IntrNoMem, IntrSpeculatable, IntrWillReturn] >; } Index: llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll +++ llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll @@ -314,7 +314,7 @@ attributes #2 = { nounwind "target-cpu"="gfx900" } attributes #3 = { nounwind } -; HSA: attributes #0 = { nounwind readnone speculatable } +; HSA: attributes #0 = { nounwind readnone speculatable willreturn } ; HSA: attributes #1 = { nounwind "amdgpu-work-item-id-x" "target-cpu"="fiji" "uniform-work-group-size"="false" } ; HSA: attributes #2 = { nounwind "amdgpu-work-item-id-y" "target-cpu"="fiji" "uniform-work-group-size"="false" } ; HSA: attributes #3 = { nounwind "amdgpu-work-item-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" } Index: llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll +++ llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll @@ -280,7 +280,7 @@ attributes #0 = { nounwind readnone speculatable } attributes #1 = { nounwind } -; HSA: attributes #0 = { nounwind readnone speculatable } +; HSA: attributes #0 = { nounwind readnone speculatable willreturn } ; HSA: attributes #1 = { nounwind } ; HSA: attributes #2 = { nounwind "amdgpu-work-group-id-y" } ; HSA: attributes #3 = { nounwind "amdgpu-work-group-id-z" }