Index: llvm/lib/Target/AMDGPU/VOP2Instructions.td =================================================================== --- llvm/lib/Target/AMDGPU/VOP2Instructions.td +++ llvm/lib/Target/AMDGPU/VOP2Instructions.td @@ -467,7 +467,7 @@ def V_MADMK_F32 : VOP2_Pseudo <"v_madmk_f32", VOP_MADMK_F32, []>; let isCommutable = 1 in { -defm V_ADD_F32 : VOP2Inst <"v_add_f32", VOP_F32_F32_F32, fadd>; +defm V_ADD_F32 : VOP2Inst <"v_add_f32", VOP_F32_F32_F32, any_fadd>; defm V_SUB_F32 : VOP2Inst <"v_sub_f32", VOP_F32_F32_F32, fsub>; defm V_SUBREV_F32 : VOP2Inst <"v_subrev_f32", VOP_F32_F32_F32, null_frag, "v_sub_f32">; defm V_MUL_LEGACY_F32 : VOP2Inst <"v_mul_legacy_f32", VOP_F32_F32_F32, AMDGPUfmul_legacy>; @@ -629,7 +629,7 @@ let isCommutable = 1 in { let FPDPRounding = 1 in { -defm V_ADD_F16 : VOP2Inst <"v_add_f16", VOP_F16_F16_F16, fadd>; +defm V_ADD_F16 : VOP2Inst <"v_add_f16", VOP_F16_F16_F16, any_fadd>; defm V_SUB_F16 : VOP2Inst <"v_sub_f16", VOP_F16_F16_F16, fsub>; defm V_SUBREV_F16 : VOP2Inst <"v_subrev_f16", VOP_F16_F16_F16, null_frag, "v_sub_f16">; defm V_MUL_F16 : VOP2Inst <"v_mul_f16", VOP_F16_F16_F16, fmul>; Index: llvm/lib/Target/AMDGPU/VOP3Instructions.td =================================================================== --- llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -303,7 +303,7 @@ let SchedRW = [WriteDoubleAdd] in { let FPDPRounding = 1 in { def V_FMA_F64 : VOP3Inst <"v_fma_f64", VOP3_Profile, fma>; -def V_ADD_F64 : VOP3Inst <"v_add_f64", VOP3_Profile, fadd, 1>; +def V_ADD_F64 : VOP3Inst <"v_add_f64", VOP3_Profile, any_fadd, 1>; def V_MUL_F64 : VOP3Inst <"v_mul_f64", VOP3_Profile, fmul, 1>; } // End FPDPRounding = 1 def V_MIN_F64 : VOP3Inst <"v_min_f64", VOP3_Profile, fminnum_like, 1>; Index: llvm/test/CodeGen/AMDGPU/strict_fadd.f16.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/strict_fadd.f16.ll @@ -0,0 +1,147 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefixes=GCN,GFX9 +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji < %s | FileCheck -check-prefixes=GCN,GFX8 +; FIXME: promotion not handled without f16 insts + +define half @v_constained_fadd_f16_fpexcept_strict(half %x, half %y) #0 { +; GCN-LABEL: v_constained_fadd_f16_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f16_e32 v0, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call half @llvm.experimental.constrained.fadd.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret half %val +} + +define half @v_constained_fadd_f16_fpexcept_ignore(half %x, half %y) #0 { +; GCN-LABEL: v_constained_fadd_f16_fpexcept_ignore: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f16_e32 v0, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call half @llvm.experimental.constrained.fadd.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") + ret half %val +} + +define half @v_constained_fadd_f16_fpexcept_maytrap(half %x, half %y) #0 { +; GCN-LABEL: v_constained_fadd_f16_fpexcept_maytrap: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f16_e32 v0, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call half @llvm.experimental.constrained.fadd.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") + ret half %val +} + +define <2 x half> @v_constained_fadd_v2f16_fpexcept_strict(<2 x half> %x, <2 x half> %y) #0 { +; GFX9-LABEL: v_constained_fadd_v2f16_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_constained_fadd_v2f16_fpexcept_strict: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <2 x half> %val +} + +define <2 x half> @v_constained_fadd_v2f16_fpexcept_ignore(<2 x half> %x, <2 x half> %y) #0 { +; GFX9-LABEL: v_constained_fadd_v2f16_fpexcept_ignore: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_constained_fadd_v2f16_fpexcept_ignore: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") + ret <2 x half> %val +} + +define <2 x half> @v_constained_fadd_v2f16_fpexcept_maytrap(<2 x half> %x, <2 x half> %y) #0 { +; GFX9-LABEL: v_constained_fadd_v2f16_fpexcept_maytrap: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_add_f16 v0, v0, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_constained_fadd_v2f16_fpexcept_maytrap: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_f16_sdwa v2, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_add_f16_e32 v0, v0, v1 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") + ret <2 x half> %val +} + +define <3 x half> @v_constained_fadd_v3f16_fpexcept_strict(<3 x half> %x, <3 x half> %y) #0 { +; GFX9-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_pk_add_f16 v0, v0, v2 +; GFX9-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] +; +; GFX8-LABEL: v_constained_fadd_v3f16_fpexcept_strict: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX8-NEXT: v_add_f16_sdwa v4, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1 +; GFX8-NEXT: v_add_f16_e32 v0, v0, v2 +; GFX8-NEXT: v_or_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-NEXT: v_add_f16_e32 v1, v1, v3 +; GFX8-NEXT: s_setpc_b64 s[30:31] + %val = call <3 x half> @llvm.experimental.constrained.fadd.v3f16(<3 x half> %x, <3 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <3 x half> %val +} + +define amdgpu_ps half @s_constained_fadd_f16_fpexcept_strict(half inreg %x, half inreg %y) #0 { +; GCN-LABEL: s_constained_fadd_f16_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: v_mov_b32_e32 v0, s3 +; GCN-NEXT: v_add_f16_e32 v0, s2, v0 +; GCN-NEXT: ; return to shader part epilog + %val = call half @llvm.experimental.constrained.fadd.f16(half %x, half %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret half %val +} + +define amdgpu_ps <2 x half> @s_constained_fadd_v2f16_fpexcept_strict(<2 x half> inreg %x, <2 x half> inreg %y) #0 { +; GFX9-LABEL: s_constained_fadd_v2f16_fpexcept_strict: +; GFX9: ; %bb.0: +; GFX9-NEXT: v_mov_b32_e32 v0, s3 +; GFX9-NEXT: v_pk_add_f16 v0, s2, v0 +; GFX9-NEXT: ; return to shader part epilog +; +; GFX8-LABEL: s_constained_fadd_v2f16_fpexcept_strict: +; GFX8: ; %bb.0: +; GFX8-NEXT: s_lshr_b32 s0, s3, 16 +; GFX8-NEXT: s_lshr_b32 s1, s2, 16 +; GFX8-NEXT: v_mov_b32_e32 v0, s0 +; GFX8-NEXT: v_mov_b32_e32 v1, s1 +; GFX8-NEXT: v_add_f16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD +; GFX8-NEXT: v_mov_b32_e32 v1, s3 +; GFX8-NEXT: v_add_f16_e32 v1, s2, v1 +; GFX8-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD +; GFX8-NEXT: ; return to shader part epilog + %val = call <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half> %x, <2 x half> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <2 x half> %val +} + +declare half @llvm.experimental.constrained.fadd.f16(half, half, metadata, metadata) #1 +declare <2 x half> @llvm.experimental.constrained.fadd.v2f16(<2 x half>, <2 x half>, metadata, metadata) #1 +declare <3 x half> @llvm.experimental.constrained.fadd.v3f16(<3 x half>, <3 x half>, metadata, metadata) #1 + +attributes #0 = { strictfp } +attributes #1 = { inaccessiblememonly nounwind willreturn } Index: llvm/test/CodeGen/AMDGPU/strict_fadd.f32.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/strict_fadd.f32.ll @@ -0,0 +1,129 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN + +define float @v_constained_fadd_f32_fpexcept_strict(float %x, float %y) #0 { +; GCN-LABEL: v_constained_fadd_f32_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f32_e32 v0, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %val +} + +define float @v_constained_fadd_f32_fpexcept_ignore(float %x, float %y) #0 { +; GCN-LABEL: v_constained_fadd_f32_fpexcept_ignore: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f32_e32 v0, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") + ret float %val +} + +define float @v_constained_fadd_f32_fpexcept_maytrap(float %x, float %y) #0 { +; GCN-LABEL: v_constained_fadd_f32_fpexcept_maytrap: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f32_e32 v0, v0, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") + ret float %val +} + +define <2 x float> @v_constained_fadd_v2f32_fpexcept_strict(<2 x float> %x, <2 x float> %y) #0 { +; GCN-LABEL: v_constained_fadd_v2f32_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f32_e32 v0, v0, v2 +; GCN-NEXT: v_add_f32_e32 v1, v1, v3 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <2 x float> %val +} + +define <2 x float> @v_constained_fadd_v2f32_fpexcept_ignore(<2 x float> %x, <2 x float> %y) #0 { +; GCN-LABEL: v_constained_fadd_v2f32_fpexcept_ignore: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f32_e32 v0, v0, v2 +; GCN-NEXT: v_add_f32_e32 v1, v1, v3 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") + ret <2 x float> %val +} + +define <2 x float> @v_constained_fadd_v2f32_fpexcept_maytrap(<2 x float> %x, <2 x float> %y) #0 { +; GCN-LABEL: v_constained_fadd_v2f32_fpexcept_maytrap: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f32_e32 v0, v0, v2 +; GCN-NEXT: v_add_f32_e32 v1, v1, v3 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %x, <2 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") + ret <2 x float> %val +} + +define <3 x float> @v_constained_fadd_v3f32_fpexcept_strict(<3 x float> %x, <3 x float> %y) #0 { +; GCN-LABEL: v_constained_fadd_v3f32_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f32_e32 v0, v0, v3 +; GCN-NEXT: v_add_f32_e32 v1, v1, v4 +; GCN-NEXT: v_add_f32_e32 v2, v2, v5 +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float> %x, <3 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <3 x float> %val +} + +define amdgpu_ps float @s_constained_fadd_f32_fpexcept_strict(float inreg %x, float inreg %y) #0 { +; GCN-LABEL: s_constained_fadd_f32_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: v_mov_b32_e32 v0, s3 +; GCN-NEXT: v_add_f32_e32 v0, s2, v0 +; GCN-NEXT: ; return to shader part epilog + %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %val +} + +define float @v_constained_fadd_f32_fpexcept_strict_fabs_lhs(float %x, float %y) #0 { +; GCN-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_lhs: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f32_e64 v0, |v0|, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %fabs.x = call float @llvm.fabs.f32(float %x) + %val = call float @llvm.experimental.constrained.fadd.f32(float %fabs.x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %val +} + +define float @v_constained_fadd_f32_fpexcept_strict_fabs_rhs(float %x, float %y) #0 { +; GCN-LABEL: v_constained_fadd_f32_fpexcept_strict_fabs_rhs: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f32_e64 v0, v0, |v1| +; GCN-NEXT: s_setpc_b64 s[30:31] + %fabs.y = call float @llvm.fabs.f32(float %y) + %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %fabs.y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %val +} + +define float @v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs(float %x, float %y) #0 { +; GCN-LABEL: v_constained_fadd_f32_fpexcept_strict_fneg_fabs_lhs: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f32_e64 v0, -|v0|, v1 +; GCN-NEXT: s_setpc_b64 s[30:31] + %fabs.x = call float @llvm.fabs.f32(float %x) + %neg.fabs.x = fneg float %fabs.x + %val = call float @llvm.experimental.constrained.fadd.f32(float %neg.fabs.x, float %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret float %val +} + +declare float @llvm.fabs.f32(float) #1 +declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata) #1 +declare <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float>, <2 x float>, metadata, metadata) #1 +declare <3 x float> @llvm.experimental.constrained.fadd.v3f32(<3 x float>, <3 x float>, metadata, metadata) #1 + +attributes #0 = { strictfp } +attributes #1 = { inaccessiblememonly nounwind willreturn } Index: llvm/test/CodeGen/AMDGPU/strict_fadd.f64.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/AMDGPU/strict_fadd.f64.ll @@ -0,0 +1,96 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=GCN + +define double @v_constained_fadd_f64_fpexcept_strict(double %x, double %y) #0 { +; GCN-LABEL: v_constained_fadd_f64_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3] +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret double %val +} + +define double @v_constained_fadd_f64_fpexcept_ignore(double %x, double %y) #0 { +; GCN-LABEL: v_constained_fadd_f64_fpexcept_ignore: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3] +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") + ret double %val +} + +define double @v_constained_fadd_f64_fpexcept_maytrap(double %x, double %y) #0 { +; GCN-LABEL: v_constained_fadd_f64_fpexcept_maytrap: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[2:3] +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") + ret double %val +} + +define <2 x double> @v_constained_fadd_v2f64_fpexcept_strict(<2 x double> %x, <2 x double> %y) #0 { +; GCN-LABEL: v_constained_fadd_v2f64_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5] +; GCN-NEXT: v_add_f64 v[2:3], v[2:3], v[6:7] +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <2 x double> %val +} + +define <2 x double> @v_constained_fadd_v2f64_fpexcept_ignore(<2 x double> %x, <2 x double> %y) #0 { +; GCN-LABEL: v_constained_fadd_v2f64_fpexcept_ignore: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5] +; GCN-NEXT: v_add_f64 v[2:3], v[2:3], v[6:7] +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.ignore") + ret <2 x double> %val +} + +define <2 x double> @v_constained_fadd_v2f64_fpexcept_maytrap(<2 x double> %x, <2 x double> %y) #0 { +; GCN-LABEL: v_constained_fadd_v2f64_fpexcept_maytrap: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[4:5] +; GCN-NEXT: v_add_f64 v[2:3], v[2:3], v[6:7] +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.maytrap") + ret <2 x double> %val +} + +define <3 x double> @v_constained_fadd_v3f64_fpexcept_strict(<3 x double> %x, <3 x double> %y) #0 { +; GCN-LABEL: v_constained_fadd_v3f64_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GCN-NEXT: v_add_f64 v[0:1], v[0:1], v[6:7] +; GCN-NEXT: v_add_f64 v[2:3], v[2:3], v[8:9] +; GCN-NEXT: v_add_f64 v[4:5], v[4:5], v[10:11] +; GCN-NEXT: s_setpc_b64 s[30:31] + %val = call <3 x double> @llvm.experimental.constrained.fadd.v3f64(<3 x double> %x, <3 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + ret <3 x double> %val +} + +define amdgpu_ps <2 x float> @s_constained_fadd_f64_fpexcept_strict(double inreg %x, double inreg %y) #0 { +; GCN-LABEL: s_constained_fadd_f64_fpexcept_strict: +; GCN: ; %bb.0: +; GCN-NEXT: v_mov_b32_e32 v0, s4 +; GCN-NEXT: v_mov_b32_e32 v1, s5 +; GCN-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1] +; GCN-NEXT: ; return to shader part epilog + %val = call double @llvm.experimental.constrained.fadd.f64(double %x, double %y, metadata !"round.tonearest", metadata !"fpexcept.strict") + %cast = bitcast double %val to <2 x float> + ret <2 x float> %cast +} + +declare double @llvm.experimental.constrained.fadd.f64(double, double, metadata, metadata) #1 +declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata) #1 +declare <3 x double> @llvm.experimental.constrained.fadd.v3f64(<3 x double>, <3 x double>, metadata, metadata) #1 + +attributes #0 = { strictfp } +attributes #1 = { inaccessiblememonly nounwind willreturn }