Index: lib/Target/AMDGPU/AMDGPUISelLowering.h =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.h +++ lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -58,8 +58,9 @@ SDValue LowerFROUND64(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const; - SDValue LowerFLOG(SDValue Op, SelectionDAG &Dag, + SDValue LowerFLOG(SDValue Op, SelectionDAG &DAG, double Log2BaseInverted) const; + SDValue lowerFEXP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const; Index: lib/Target/AMDGPU/AMDGPUISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -318,6 +318,7 @@ setOperationAction(ISD::FLOG, MVT::f32, Custom); setOperationAction(ISD::FLOG10, MVT::f32, Custom); + setOperationAction(ISD::FEXP, MVT::f32, Custom); setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom); @@ -450,6 +451,7 @@ setOperationAction(ISD::FCOS, VT, Expand); setOperationAction(ISD::FDIV, VT, Expand); setOperationAction(ISD::FEXP2, VT, Expand); + setOperationAction(ISD::FEXP, VT, Expand); setOperationAction(ISD::FLOG2, VT, Expand); setOperationAction(ISD::FREM, VT, Expand); setOperationAction(ISD::FLOG, VT, Expand); @@ -1143,6 +1145,8 @@ return LowerFLOG(Op, DAG, 1 / AMDGPU_LOG2E_F); case ISD::FLOG10: return LowerFLOG(Op, DAG, AMDGPU_LN2_F / AMDGPU_LN10_F); + case ISD::FEXP: + return lowerFEXP(Op, DAG); case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG); @@ -2219,6 +2223,34 @@ return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand); } +// Return M_LOG2E of appropriate type +static SDValue getLog2EVal(SelectionDAG &DAG, const SDLoc &SL, EVT VT) { + switch (VT.getScalarType().getSimpleVT().SimpleTy) { + case MVT::f32: + return DAG.getConstantFP(1.44269504088896340735992468100189214f, SL, VT); + case MVT::f16: + return DAG.getConstantFP( + APFloat(APFloat::IEEEhalf(), "1.44269504088896340735992468100189214"), + SL, VT); + case MVT::f64: + return DAG.getConstantFP( + APFloat(APFloat::IEEEdouble(), "0x1.71547652b82fep+0"), SL, VT); + default: + llvm_unreachable("unsupported fp type"); + } +} + +// exp2(M_LOG2E_F * f); +SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + SDLoc SL(Op); + SDValue Src = Op.getOperand(0); + + const SDValue K = getLog2EVal(DAG, SL, VT); + SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags()); + return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags()); +} + static bool isCtlzOpc(unsigned Opc) { return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF; } Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -345,6 +345,7 @@ if (Subtarget->has16BitInsts()) { setOperationAction(ISD::FLOG, MVT::f16, Custom); + setOperationAction(ISD::FEXP, MVT::f16, Custom); setOperationAction(ISD::FLOG10, MVT::f16, Custom); } @@ -622,6 +623,7 @@ setOperationAction(ISD::FMAXNUM, MVT::v4f16, Custom); setOperationAction(ISD::FCANONICALIZE, MVT::v4f16, Custom); + setOperationAction(ISD::FEXP, MVT::v2f16, Custom); setOperationAction(ISD::SELECT, MVT::v4i16, Custom); setOperationAction(ISD::SELECT, MVT::v4f16, Custom); } Index: test/CodeGen/AMDGPU/fexp.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/fexp.ll @@ -0,0 +1,302 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +;RUN: llc -mtriple=amdgcn-- < %s | FileCheck -enable-var-scope -check-prefix=SI %s +;RUN: llc -mtriple=amdgcn-- -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefix=VI %s +;RUN: llc -mtriple=amdgcn-- -mcpu=gfx900 < %s | FileCheck -enable-var-scope -check-prefix=GFX9 %s + +define float @v_exp_f32(float %arg0) { +; SI-LABEL: v_exp_f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0 +; VI-NEXT: v_exp_f32_e32 v0, v0 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_f32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0 +; GFX9-NEXT: v_exp_f32_e32 v0, v0 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call float @llvm.exp.f32(float %arg0) + ret float %result +} + +define <2 x float> @v_exp_v2f32(<2 x float> %arg0) { +; SI-LABEL: v_exp_v2f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b +; SI-NEXT: v_mul_f32_e32 v0, v0, v2 +; SI-NEXT: v_mul_f32_e32 v1, v1, v2 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: v_exp_f32_e32 v1, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_v2f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b +; VI-NEXT: v_mul_f32_e32 v0, v0, v2 +; VI-NEXT: v_mul_f32_e32 v1, v1, v2 +; VI-NEXT: v_exp_f32_e32 v0, v0 +; VI-NEXT: v_exp_f32_e32 v1, v1 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_v2f32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b +; GFX9-NEXT: v_mul_f32_e32 v0, v0, v2 +; GFX9-NEXT: v_mul_f32_e32 v1, v1, v2 +; GFX9-NEXT: v_exp_f32_e32 v0, v0 +; GFX9-NEXT: v_exp_f32_e32 v1, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call <2 x float> @llvm.exp.v2f32(<2 x float> %arg0) + ret <2 x float> %result +} + +define <3 x float> @v_exp_v3f32(<3 x float> %arg0) { +; SI-LABEL: v_exp_v3f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b +; SI-NEXT: v_mul_f32_e32 v0, v0, v3 +; SI-NEXT: v_mul_f32_e32 v1, v1, v3 +; SI-NEXT: v_mul_f32_e32 v2, v2, v3 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: v_exp_f32_e32 v1, v1 +; SI-NEXT: v_exp_f32_e32 v2, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_v3f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b +; VI-NEXT: v_mul_f32_e32 v0, v0, v3 +; VI-NEXT: v_mul_f32_e32 v1, v1, v3 +; VI-NEXT: v_mul_f32_e32 v2, v2, v3 +; VI-NEXT: v_exp_f32_e32 v0, v0 +; VI-NEXT: v_exp_f32_e32 v1, v1 +; VI-NEXT: v_exp_f32_e32 v2, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_v3f32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v3, 0x3fb8aa3b +; GFX9-NEXT: v_mul_f32_e32 v0, v0, v3 +; GFX9-NEXT: v_mul_f32_e32 v1, v1, v3 +; GFX9-NEXT: v_mul_f32_e32 v2, v2, v3 +; GFX9-NEXT: v_exp_f32_e32 v0, v0 +; GFX9-NEXT: v_exp_f32_e32 v1, v1 +; GFX9-NEXT: v_exp_f32_e32 v2, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call <3 x float> @llvm.exp.v3f32(<3 x float> %arg0) + ret <3 x float> %result +} + +define <4 x float> @v_exp_v4f32(<4 x float> %arg0) { +; SI-LABEL: v_exp_v4f32: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b +; SI-NEXT: v_mul_f32_e32 v0, v0, v4 +; SI-NEXT: v_mul_f32_e32 v1, v1, v4 +; SI-NEXT: v_mul_f32_e32 v2, v2, v4 +; SI-NEXT: v_mul_f32_e32 v3, v3, v4 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: v_exp_f32_e32 v1, v1 +; SI-NEXT: v_exp_f32_e32 v2, v2 +; SI-NEXT: v_exp_f32_e32 v3, v3 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_v4f32: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b +; VI-NEXT: v_mul_f32_e32 v0, v0, v4 +; VI-NEXT: v_mul_f32_e32 v1, v1, v4 +; VI-NEXT: v_mul_f32_e32 v2, v2, v4 +; VI-NEXT: v_mul_f32_e32 v3, v3, v4 +; VI-NEXT: v_exp_f32_e32 v0, v0 +; VI-NEXT: v_exp_f32_e32 v1, v1 +; VI-NEXT: v_exp_f32_e32 v2, v2 +; VI-NEXT: v_exp_f32_e32 v3, v3 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_v4f32: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b +; GFX9-NEXT: v_mul_f32_e32 v0, v0, v4 +; GFX9-NEXT: v_mul_f32_e32 v1, v1, v4 +; GFX9-NEXT: v_mul_f32_e32 v2, v2, v4 +; GFX9-NEXT: v_mul_f32_e32 v3, v3, v4 +; GFX9-NEXT: v_exp_f32_e32 v0, v0 +; GFX9-NEXT: v_exp_f32_e32 v1, v1 +; GFX9-NEXT: v_exp_f32_e32 v2, v2 +; GFX9-NEXT: v_exp_f32_e32 v3, v3 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call <4 x float> @llvm.exp.v4f32(<4 x float> %arg0) + ret <4 x float> %result +} + +define half @v_exp_f16(half %arg0) { +; SI-LABEL: v_exp_f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_mul_f32_e32 v0, 0x3fb8aa3b, v0 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mul_f16_e32 v0, 0x3dc5, v0 +; VI-NEXT: v_exp_f16_e32 v0, v0 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mul_f16_e32 v0, 0x3dc5, v0 +; GFX9-NEXT: v_exp_f16_e32 v0, v0 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call half @llvm.exp.f16(half %arg0) + ret half %result +} + +define <2 x half> @v_exp_v2f16(<2 x half> %arg0) { +; SI-LABEL: v_exp_v2f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_mov_b32_e32 v2, 0x3fb8aa3b +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_mul_f32_e32 v0, v0, v2 +; SI-NEXT: v_mul_f32_e32 v1, v1, v2 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: v_exp_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_or_b32_e32 v0, v0, v1 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_v2f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v1, 0x3dc5 +; VI-NEXT: v_mul_f16_sdwa v2, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_mul_f16_e32 v0, v0, v1 +; VI-NEXT: v_exp_f16_sdwa v2, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD +; VI-NEXT: v_exp_f16_e32 v0, v0 +; VI-NEXT: v_or_b32_e32 v0, v0, v2 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_v2f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v1, 0x3dc5 +; GFX9-NEXT: v_pk_mul_f16 v0, v0, v1 op_sel_hi:[1,0] +; GFX9-NEXT: v_exp_f16_e32 v1, v0 +; GFX9-NEXT: v_exp_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 +; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1 +; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v1 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call <2 x half> @llvm.exp.v2f16(<2 x half> %arg0) + ret <2 x half> %result +} + +; define <3 x half> @v_exp_v3f16(<3 x half> %arg0) { +; %result = call <3 x half> @llvm.exp.v3f16(<3 x half> %arg0) +; ret <3 x half> %result +; } + +define <4 x half> @v_exp_v4f16(<4 x half> %arg0) { +; SI-LABEL: v_exp_v4f16: +; SI: ; %bb.0: +; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1 +; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v0 +; SI-NEXT: v_cvt_f32_f16_e32 v1, v1 +; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 +; SI-NEXT: v_mov_b32_e32 v4, 0x3fb8aa3b +; SI-NEXT: v_cvt_f32_f16_e32 v2, v2 +; SI-NEXT: v_cvt_f32_f16_e32 v3, v3 +; SI-NEXT: v_mul_f32_e32 v0, v0, v4 +; SI-NEXT: v_mul_f32_e32 v1, v1, v4 +; SI-NEXT: v_mul_f32_e32 v3, v3, v4 +; SI-NEXT: v_exp_f32_e32 v0, v0 +; SI-NEXT: v_mul_f32_e32 v2, v2, v4 +; SI-NEXT: v_exp_f32_e32 v1, v1 +; SI-NEXT: v_exp_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v0, v0 +; SI-NEXT: v_exp_f32_e32 v2, v2 +; SI-NEXT: v_cvt_f16_f32_e32 v1, v1 +; SI-NEXT: v_cvt_f16_f32_e32 v3, v3 +; SI-NEXT: v_cvt_f16_f32_e32 v2, v2 +; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3 +; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2 +; SI-NEXT: v_or_b32_e32 v0, v0, v3 +; SI-NEXT: v_or_b32_e32 v1, v1, v2 +; SI-NEXT: s_setpc_b64 s[30:31] +; +; VI-LABEL: v_exp_v4f16: +; VI: ; %bb.0: +; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; VI-NEXT: v_mov_b32_e32 v2, 0x3dc5 +; VI-NEXT: v_mul_f16_e32 v3, v1, v2 +; VI-NEXT: v_mul_f16_e32 v4, v0, v2 +; VI-NEXT: v_mul_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; VI-NEXT: v_exp_f16_e32 v3, v3 +; VI-NEXT: v_exp_f16_sdwa v1, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD +; VI-NEXT: v_exp_f16_e32 v4, v4 +; VI-NEXT: v_exp_f16_sdwa v0, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD +; VI-NEXT: v_or_b32_e32 v1, v3, v1 +; VI-NEXT: v_or_b32_e32 v0, v4, v0 +; VI-NEXT: s_setpc_b64 s[30:31] +; +; GFX9-LABEL: v_exp_v4f16: +; GFX9: ; %bb.0: +; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) +; GFX9-NEXT: v_mov_b32_e32 v2, 0x3dc5 +; GFX9-NEXT: v_mul_f16_e32 v3, v1, v2 +; GFX9-NEXT: v_mul_f16_e32 v4, v0, v2 +; GFX9-NEXT: v_mul_f16_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_mul_f16_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD +; GFX9-NEXT: v_exp_f16_e32 v3, v3 +; GFX9-NEXT: v_exp_f16_e32 v4, v4 +; GFX9-NEXT: v_exp_f16_e32 v0, v0 +; GFX9-NEXT: v_exp_f16_e32 v1, v1 +; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff +; GFX9-NEXT: v_and_b32_e32 v4, v2, v4 +; GFX9-NEXT: v_and_b32_e32 v2, v2, v3 +; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v4 +; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v2 +; GFX9-NEXT: s_setpc_b64 s[30:31] + %result = call <4 x half> @llvm.exp.v4f16(<4 x half> %arg0) + ret <4 x half> %result +} + +declare float @llvm.exp.f32(float) +declare <2 x float> @llvm.exp.v2f32(<2 x float>) +declare <3 x float> @llvm.exp.v3f32(<3 x float>) +declare <4 x float> @llvm.exp.v4f32(<4 x float>) + +declare half @llvm.exp.f16(half) +declare <2 x half> @llvm.exp.v2f16(<2 x half>) +declare <3 x half> @llvm.exp.v3f16(<3 x half>) +declare <4 x half> @llvm.exp.v4f16(<4 x half>) +