Index: llvm/include/llvm/IR/IntrinsicsAMDGPU.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAMDGPU.td +++ llvm/include/llvm/IR/IntrinsicsAMDGPU.td @@ -345,12 +345,6 @@ def int_amdgcn_rsq_clamp : DefaultAttrsIntrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]>; -// For int_amdgcn_ldexp_f16, only the low 16 bits of the i32 src1 operand will used. -def int_amdgcn_ldexp : DefaultAttrsIntrinsic< - [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], - [IntrNoMem, IntrSpeculatable] ->; - def int_amdgcn_frexp_mant : DefaultAttrsIntrinsic< [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable] >; Index: llvm/lib/IR/AutoUpgrade.cpp =================================================================== --- llvm/lib/IR/AutoUpgrade.cpp +++ llvm/lib/IR/AutoUpgrade.cpp @@ -834,6 +834,13 @@ {F->getReturnType()}); return true; } + if (Name.startswith("amdgcn.ldexp")) { + // Target specific intrinsic became redundant + NewFn = Intrinsic::getDeclaration( + F->getParent(), Intrinsic::ldexp, + {F->getReturnType(), F->getArg(1)->getType()}); + return true; + } break; } Index: llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -3207,8 +3207,7 @@ case Intrinsic::amdgcn_rsq: case Intrinsic::amdgcn_rcp_legacy: case Intrinsic::amdgcn_rsq_legacy: - case Intrinsic::amdgcn_rsq_clamp: - case Intrinsic::amdgcn_ldexp: { + case Intrinsic::amdgcn_rsq_clamp: { // FIXME: This is probably wrong. If src is an sNaN, it won't be quieted SDValue Src = N->getOperand(1); return Src.isUndef() ? Src : SDValue(); Index: llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp =================================================================== --- llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp +++ llvm/lib/Target/AMDGPU/AMDGPUInstCombineIntrinsic.cpp @@ -954,50 +954,6 @@ break; } - case Intrinsic::amdgcn_ldexp: { - // FIXME: This doesn't introduce new instructions and belongs in - // InstructionSimplify. - Type *Ty = II.getType(); - Value *Op0 = II.getArgOperand(0); - Value *Op1 = II.getArgOperand(1); - - // Folding undef to qnan is safe regardless of the FP mode. - if (isa(Op0)) { - auto *QNaN = ConstantFP::get(Ty, APFloat::getQNaN(Ty->getFltSemantics())); - return IC.replaceInstUsesWith(II, QNaN); - } - - const APFloat *C = nullptr; - match(Op0, PatternMatch::m_APFloat(C)); - - // FIXME: Should flush denorms depending on FP mode, but that's ignored - // everywhere else. - // - // These cases should be safe, even with strictfp. - // ldexp(0.0, x) -> 0.0 - // ldexp(-0.0, x) -> -0.0 - // ldexp(inf, x) -> inf - // ldexp(-inf, x) -> -inf - if (C && (C->isZero() || C->isInfinity())) { - return IC.replaceInstUsesWith(II, Op0); - } - - // With strictfp, be more careful about possibly needing to flush denormals - // or not, and snan behavior depends on ieee_mode. - if (II.isStrictFP()) - break; - - if (C && C->isNaN()) - return IC.replaceInstUsesWith(II, ConstantFP::get(Ty, C->makeQuiet())); - - // ldexp(x, 0) -> x - // ldexp(x, undef) -> x - if (isa(Op1) || match(Op1, PatternMatch::m_ZeroInt())) { - return IC.replaceInstUsesWith(II, Op0); - } - - break; - } case Intrinsic::amdgcn_fmul_legacy: { Value *Op0 = II.getArgOperand(0); Value *Op1 = II.getArgOperand(1); Index: llvm/lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -7079,9 +7079,6 @@ return emitRemovedIntrinsicError(DAG, DL, VT); } - case Intrinsic::amdgcn_ldexp: - return DAG.getNode(ISD::FLDEXP, DL, VT, Op.getOperand(1), Op.getOperand(2)); - case Intrinsic::amdgcn_fract: return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); @@ -10435,7 +10432,6 @@ case Intrinsic::amdgcn_div_fmas: case Intrinsic::amdgcn_div_fixup: case Intrinsic::amdgcn_fract: - case Intrinsic::amdgcn_ldexp: case Intrinsic::amdgcn_cvt_pkrtz: case Intrinsic::amdgcn_cubeid: case Intrinsic::amdgcn_cubema: Index: llvm/test/Bitcode/amdgcn-ldexp.ll =================================================================== --- /dev/null +++ llvm/test/Bitcode/amdgcn-ldexp.ll @@ -0,0 +1,30 @@ +; RUN: llvm-as < %s | llvm-dis | FileCheck %s + +define float @f32(float %a, i32 %b) { + ; CHECK: %call = call float @llvm.ldexp.f32.i32(float %a, i32 %b) + ; CHECK-NOT: amdgcn.ldexp + %call = call float @llvm.amdgcn.ldexp.f32(float %a, i32 %b) + ret float %call +} + +define double @f64(double %a, i32 %b) { + ; CHECK: %call = call double @llvm.ldexp.f64.i32(double %a, i32 %b) + ; CHECK-NOT: amdgcn.ldexp + %call = call double @llvm.amdgcn.ldexp.f64(double %a, i32 %b) + ret double %call +} + +define half @f16(half %a, i32 %b) { + ; CHECK: %call = call half @llvm.ldexp.f16.i32(half %a, i32 %b) + ; CHECK-NOT: amdgcn.ldexp + %call = call half @llvm.amdgcn.ldexp.f16(half %a, i32 %b) + ret half %call +} + +declare half @llvm.amdgcn.ldexp.f16(half, i32) +declare float @llvm.amdgcn.ldexp.f32(float, i32) +declare double @llvm.amdgcn.ldexp.f64(double, i32) +; CHECK: declare half @llvm.ldexp.f16.i32(half, i32) +; CHECK: declare float @llvm.ldexp.f32.i32(float, i32) +; CHECK: declare double @llvm.ldexp.f64.i32(double, i32) +; CHECK-NOT: amdgcn.ldexp Index: llvm/test/CodeGen/AMDGPU/known-never-snan.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/known-never-snan.ll +++ llvm/test/CodeGen/AMDGPU/known-never-snan.ll @@ -516,7 +516,7 @@ ; GCN-NEXT: v_ldexp_f32 v0, v0, v1 ; GCN-NEXT: v_med3_f32 v0, v0, 2.0, 4.0 ; GCN-NEXT: s_setpc_b64 s[30:31] - %known.not.snan = call float @llvm.amdgcn.ldexp.f32(float %a, i32 %b) + %known.not.snan = call float @llvm.ldexp.f32.i32(float %a, i32 %b) %max = call float @llvm.maxnum.f32(float %known.not.snan, float 2.0) %med = call float @llvm.minnum.f32(float %max, float 4.0) ret float %med @@ -658,7 +658,7 @@ declare float @llvm.copysign.f32(float, float) #1 declare float @llvm.fma.f32(float, float, float) #1 declare float @llvm.fmuladd.f32(float, float, float) #1 -declare float @llvm.amdgcn.ldexp.f32(float, i32) #1 +declare float @llvm.ldexp.f32.i32(float, i32) #1 declare float @llvm.amdgcn.fmul.legacy(float, float) #1 declare float @llvm.amdgcn.fmed3.f32(float, float, float) #1 declare float @llvm.amdgcn.frexp.mant.f32(float) #1 Index: llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll +++ /dev/null @@ -1,211 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=VI %s -; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx1010 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX10 %s -; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx1100 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX11 %s - -declare half @llvm.amdgcn.ldexp.f16(half %a, i32 %b) - -define amdgpu_kernel void @ldexp_f16( -; VI-LABEL: ldexp_f16: -; VI: ; %bb.0: -; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 -; VI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 -; VI-NEXT: s_mov_b32 s3, 0xf000 -; VI-NEXT: s_mov_b32 s2, -1 -; VI-NEXT: s_mov_b32 s14, s2 -; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mov_b32 s12, s6 -; VI-NEXT: s_mov_b32 s13, s7 -; VI-NEXT: s_mov_b32 s15, s3 -; VI-NEXT: s_mov_b32 s10, s2 -; VI-NEXT: s_mov_b32 s11, s3 -; VI-NEXT: buffer_load_ushort v0, off, s[12:15], 0 -; VI-NEXT: buffer_load_dword v1, off, s[8:11], 0 -; VI-NEXT: s_mov_b32 s0, s4 -; VI-NEXT: s_mov_b32 s1, s5 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_ldexp_f16_e32 v0, v0, v1 -; VI-NEXT: buffer_store_short v0, off, s[0:3], 0 -; VI-NEXT: s_endpgm -; -; GFX10-LABEL: ldexp_f16: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_clause 0x1 -; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24 -; GFX10-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x34 -; GFX10-NEXT: s_mov_b32 s2, -1 -; GFX10-NEXT: s_mov_b32 s3, 0x31016000 -; GFX10-NEXT: s_mov_b32 s14, s2 -; GFX10-NEXT: s_mov_b32 s15, s3 -; GFX10-NEXT: s_mov_b32 s10, s2 -; GFX10-NEXT: s_mov_b32 s11, s3 -; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_mov_b32 s12, s6 -; GFX10-NEXT: s_mov_b32 s13, s7 -; GFX10-NEXT: buffer_load_ushort v0, off, s[12:15], 0 -; GFX10-NEXT: buffer_load_dword v1, off, s[8:11], 0 -; GFX10-NEXT: s_mov_b32 s0, s4 -; GFX10-NEXT: s_mov_b32 s1, s5 -; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_ldexp_f16_e32 v0, v0, v1 -; GFX10-NEXT: buffer_store_short v0, off, s[0:3], 0 -; GFX10-NEXT: s_endpgm -; -; GFX11-LABEL: ldexp_f16: -; GFX11: ; %bb.0: -; GFX11-NEXT: s_clause 0x1 -; GFX11-NEXT: s_load_b128 s[4:7], s[0:1], 0x24 -; GFX11-NEXT: s_load_b64 s[0:1], s[0:1], 0x34 -; GFX11-NEXT: s_mov_b32 s10, -1 -; GFX11-NEXT: s_mov_b32 s11, 0x31016000 -; GFX11-NEXT: s_mov_b32 s14, s10 -; GFX11-NEXT: s_mov_b32 s15, s11 -; GFX11-NEXT: s_mov_b32 s2, s10 -; GFX11-NEXT: s_mov_b32 s3, s11 -; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: s_mov_b32 s12, s6 -; GFX11-NEXT: s_mov_b32 s13, s7 -; GFX11-NEXT: buffer_load_u16 v0, off, s[12:15], 0 -; GFX11-NEXT: buffer_load_b32 v1, off, s[0:3], 0 -; GFX11-NEXT: s_mov_b32 s8, s4 -; GFX11-NEXT: s_mov_b32 s9, s5 -; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_ldexp_f16_e32 v0, v0, v1 -; GFX11-NEXT: buffer_store_b16 v0, off, s[8:11], 0 -; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) -; GFX11-NEXT: s_endpgm - ptr addrspace(1) %r, - ptr addrspace(1) %a, - ptr addrspace(1) %b) { - %a.val = load half, ptr addrspace(1) %a - %b.val = load i32, ptr addrspace(1) %b - %r.val = call half @llvm.amdgcn.ldexp.f16(half %a.val, i32 %b.val) - store half %r.val, ptr addrspace(1) %r - ret void -} - -define amdgpu_kernel void @ldexp_f16_imm_a( -; VI-LABEL: ldexp_f16_imm_a: -; VI: ; %bb.0: -; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 -; VI-NEXT: s_mov_b32 s10, s6 -; VI-NEXT: s_mov_b32 s11, s7 -; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mov_b32 s8, s2 -; VI-NEXT: s_mov_b32 s9, s3 -; VI-NEXT: buffer_load_dword v0, off, s[8:11], 0 -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_ldexp_f16_e32 v0, 2.0, v0 -; VI-NEXT: buffer_store_short v0, off, s[4:7], 0 -; VI-NEXT: s_endpgm -; -; GFX10-LABEL: ldexp_f16_imm_a: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 -; GFX10-NEXT: s_mov_b32 s6, -1 -; GFX10-NEXT: s_mov_b32 s7, 0x31016000 -; GFX10-NEXT: s_mov_b32 s10, s6 -; GFX10-NEXT: s_mov_b32 s11, s7 -; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_mov_b32 s8, s2 -; GFX10-NEXT: s_mov_b32 s9, s3 -; GFX10-NEXT: s_mov_b32 s4, s0 -; GFX10-NEXT: buffer_load_dword v0, off, s[8:11], 0 -; GFX10-NEXT: s_mov_b32 s5, s1 -; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_ldexp_f16_e32 v0, 2.0, v0 -; GFX10-NEXT: buffer_store_short v0, off, s[4:7], 0 -; GFX10-NEXT: s_endpgm -; -; GFX11-LABEL: ldexp_f16_imm_a: -; GFX11: ; %bb.0: -; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24 -; GFX11-NEXT: s_mov_b32 s6, -1 -; GFX11-NEXT: s_mov_b32 s7, 0x31016000 -; GFX11-NEXT: s_mov_b32 s10, s6 -; GFX11-NEXT: s_mov_b32 s11, s7 -; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: s_mov_b32 s8, s2 -; GFX11-NEXT: s_mov_b32 s9, s3 -; GFX11-NEXT: s_mov_b32 s4, s0 -; GFX11-NEXT: buffer_load_b32 v0, off, s[8:11], 0 -; GFX11-NEXT: s_mov_b32 s5, s1 -; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_ldexp_f16_e32 v0, 2.0, v0 -; GFX11-NEXT: buffer_store_b16 v0, off, s[4:7], 0 -; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) -; GFX11-NEXT: s_endpgm - ptr addrspace(1) %r, - ptr addrspace(1) %b) { - %b.val = load i32, ptr addrspace(1) %b - %r.val = call half @llvm.amdgcn.ldexp.f16(half 2.0, i32 %b.val) - store half %r.val, ptr addrspace(1) %r - ret void -} - -define amdgpu_kernel void @ldexp_f16_imm_b( -; VI-LABEL: ldexp_f16_imm_b: -; VI: ; %bb.0: -; VI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 -; VI-NEXT: s_mov_b32 s7, 0xf000 -; VI-NEXT: s_mov_b32 s6, -1 -; VI-NEXT: s_mov_b32 s10, s6 -; VI-NEXT: s_mov_b32 s11, s7 -; VI-NEXT: s_waitcnt lgkmcnt(0) -; VI-NEXT: s_mov_b32 s8, s2 -; VI-NEXT: s_mov_b32 s9, s3 -; VI-NEXT: buffer_load_ushort v0, off, s[8:11], 0 -; VI-NEXT: s_mov_b32 s4, s0 -; VI-NEXT: s_mov_b32 s5, s1 -; VI-NEXT: s_waitcnt vmcnt(0) -; VI-NEXT: v_ldexp_f16_e64 v0, v0, 2 -; VI-NEXT: buffer_store_short v0, off, s[4:7], 0 -; VI-NEXT: s_endpgm -; -; GFX10-LABEL: ldexp_f16_imm_b: -; GFX10: ; %bb.0: -; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24 -; GFX10-NEXT: s_mov_b32 s6, -1 -; GFX10-NEXT: s_mov_b32 s7, 0x31016000 -; GFX10-NEXT: s_mov_b32 s10, s6 -; GFX10-NEXT: s_mov_b32 s11, s7 -; GFX10-NEXT: s_waitcnt lgkmcnt(0) -; GFX10-NEXT: s_mov_b32 s8, s2 -; GFX10-NEXT: s_mov_b32 s9, s3 -; GFX10-NEXT: s_mov_b32 s4, s0 -; GFX10-NEXT: buffer_load_ushort v0, off, s[8:11], 0 -; GFX10-NEXT: s_mov_b32 s5, s1 -; GFX10-NEXT: s_waitcnt vmcnt(0) -; GFX10-NEXT: v_ldexp_f16_e64 v0, v0, 2 -; GFX10-NEXT: buffer_store_short v0, off, s[4:7], 0 -; GFX10-NEXT: s_endpgm -; -; GFX11-LABEL: ldexp_f16_imm_b: -; GFX11: ; %bb.0: -; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x24 -; GFX11-NEXT: s_mov_b32 s6, -1 -; GFX11-NEXT: s_mov_b32 s7, 0x31016000 -; GFX11-NEXT: s_mov_b32 s10, s6 -; GFX11-NEXT: s_mov_b32 s11, s7 -; GFX11-NEXT: s_waitcnt lgkmcnt(0) -; GFX11-NEXT: s_mov_b32 s8, s2 -; GFX11-NEXT: s_mov_b32 s9, s3 -; GFX11-NEXT: s_mov_b32 s4, s0 -; GFX11-NEXT: buffer_load_u16 v0, off, s[8:11], 0 -; GFX11-NEXT: s_mov_b32 s5, s1 -; GFX11-NEXT: s_waitcnt vmcnt(0) -; GFX11-NEXT: v_ldexp_f16_e64 v0, v0, 2 -; GFX11-NEXT: buffer_store_b16 v0, off, s[4:7], 0 -; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS) -; GFX11-NEXT: s_endpgm - ptr addrspace(1) %r, - ptr addrspace(1) %a) { - %a.val = load half, ptr addrspace(1) %a - %r.val = call half @llvm.amdgcn.ldexp.f16(half %a.val, i32 2) - store half %r.val, ptr addrspace(1) %r - ret void -} Index: llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll =================================================================== --- llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.ll +++ /dev/null @@ -1,31 +0,0 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s - -declare float @llvm.amdgcn.ldexp.f32(float, i32) nounwind readnone -declare double @llvm.amdgcn.ldexp.f64(double, i32) nounwind readnone - -; SI-LABEL: {{^}}test_ldexp_f32: -; SI: v_ldexp_f32 -; SI: s_endpgm -define amdgpu_kernel void @test_ldexp_f32(ptr addrspace(1) %out, float %a, i32 %b) nounwind { - %result = call float @llvm.amdgcn.ldexp.f32(float %a, i32 %b) nounwind readnone - store float %result, ptr addrspace(1) %out, align 4 - ret void -} - -; SI-LABEL: {{^}}test_ldexp_f64: -; SI: v_ldexp_f64 -; SI: s_endpgm -define amdgpu_kernel void @test_ldexp_f64(ptr addrspace(1) %out, double %a, i32 %b) nounwind { - %result = call double @llvm.amdgcn.ldexp.f64(double %a, i32 %b) nounwind readnone - store double %result, ptr addrspace(1) %out, align 8 - ret void -} - -; SI-LABEL: {{^}}test_ldexp_undef_f32: -; SI-NOT: v_ldexp_f32 -define amdgpu_kernel void @test_ldexp_undef_f32(ptr addrspace(1) %out, i32 %b) nounwind { - %result = call float @llvm.amdgcn.ldexp.f32(float undef, i32 %b) nounwind readnone - store float %result, ptr addrspace(1) %out, align 4 - ret void -}