Index: lib/Target/AMDGPU/SIFoldOperands.cpp =================================================================== --- lib/Target/AMDGPU/SIFoldOperands.cpp +++ lib/Target/AMDGPU/SIFoldOperands.cpp @@ -79,6 +79,9 @@ const MachineOperand *isClamp(const MachineInstr &MI) const; bool tryFoldClamp(MachineInstr &MI); + std::pair isOMod(const MachineInstr &MI) const; + bool tryFoldOMod(MachineInstr &MI); + public: SIFoldOperands() : MachineFunctionPass(ID) { initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry()); @@ -135,7 +138,7 @@ return new SIFoldOperands(); } -static bool isSafeToFold(const MachineInstr &MI) { +static bool isFoldableCopy(const MachineInstr &MI) { switch (MI.getOpcode()) { case AMDGPU::V_MOV_B32_e32: case AMDGPU::V_MOV_B32_e64: @@ -730,7 +733,6 @@ return true; } -// FIXME: Does this need to check IEEE bit on function? bool SIFoldOperands::tryFoldClamp(MachineInstr &MI) { const MachineOperand *ClampSrc = isClamp(MI); if (!ClampSrc || !hasOneNonDBGUseInst(*MRI, ClampSrc->getReg())) @@ -752,6 +754,128 @@ return true; } +static int getOModValue(unsigned Opc, int64_t Val) { + switch (Opc) { + case AMDGPU::V_MUL_F32_e64: { + switch (static_cast(Val)) { + case 0x3f000000: // 0.5 + return SIOutMods::DIV2; + case 0x40000000: // 2.0 + return SIOutMods::MUL2; + case 0x40800000: // 4.0 + return SIOutMods::MUL4; + default: + return SIOutMods::NONE; + } + } + case AMDGPU::V_MUL_F16_e64: { + switch (static_cast(Val)) { + case 0x3800: // 0.5 + return SIOutMods::DIV2; + case 0x4000: // 2.0 + return SIOutMods::MUL2; + case 0x4400: // 4.0 + return SIOutMods::MUL4; + default: + return SIOutMods::NONE; + } + } + default: + llvm_unreachable("invalid mul opcode"); + } +} + +// FIXME: Does this really not support denormals with f16? +// FIXME: Does this need to check IEEE mode bit? SNaNs are generally not +// handled, so will anything other than that break? +std::pair +SIFoldOperands::isOMod(const MachineInstr &MI) const { + unsigned Op = MI.getOpcode(); + switch (Op) { + case AMDGPU::V_MUL_F32_e64: + case AMDGPU::V_MUL_F16_e64: { + // XXX - Do denormals work with f16 omod? + if ((ST->hasFP32Denormals() && Op == AMDGPU::V_MUL_F32_e64) || + (ST->hasFP16Denormals() && Op == AMDGPU::V_MUL_F16_e64)) + return std::make_pair(nullptr, SIOutMods::NONE); + + const MachineOperand *RegOp = nullptr; + const MachineOperand *ImmOp = nullptr; + const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); + const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); + if (Src0->isImm()) { + ImmOp = Src0; + RegOp = Src1; + } else if (Src1->isImm()) { + ImmOp = Src1; + RegOp = Src0; + } else + return std::make_pair(nullptr, SIOutMods::NONE); + + int OMod = getOModValue(Op, ImmOp->getImm()); + if (OMod == SIOutMods::NONE || + TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || + TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || + TII->hasModifiersSet(MI, AMDGPU::OpName::omod) || + TII->hasModifiersSet(MI, AMDGPU::OpName::clamp)) + return std::make_pair(nullptr, SIOutMods::NONE); + + return std::make_pair(RegOp, OMod); + } + case AMDGPU::V_ADD_F32_e64: + case AMDGPU::V_ADD_F16_e64: { + // XXX - Do denormals work with f16 omod? + if ((ST->hasFP32Denormals() && Op == AMDGPU::V_ADD_F32_e64) || + (ST->hasFP16Denormals() && Op == AMDGPU::V_ADD_F16_e64)) + return std::make_pair(nullptr, SIOutMods::NONE); + + // Look through the DAGCombiner canonicalization fmul x, 2 -> fadd x, x + const MachineOperand *Src0 = TII->getNamedOperand(MI, AMDGPU::OpName::src0); + const MachineOperand *Src1 = TII->getNamedOperand(MI, AMDGPU::OpName::src1); + + if (Src0->isReg() && Src1->isReg() && Src0->getReg() == Src1->getReg() && + Src0->getSubReg() == Src1->getSubReg() && + !TII->hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) && + !TII->hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) && + !TII->hasModifiersSet(MI, AMDGPU::OpName::clamp) && + !TII->hasModifiersSet(MI, AMDGPU::OpName::omod)) + return std::make_pair(Src0, SIOutMods::MUL2); + + return std::make_pair(nullptr, SIOutMods::NONE); + } + default: + return std::make_pair(nullptr, SIOutMods::NONE); + } +} + +// FIXME: Does this need to check IEEE bit on function? +bool SIFoldOperands::tryFoldOMod(MachineInstr &MI) { + const MachineOperand *RegOp; + int OMod; + std::tie(RegOp, OMod) = isOMod(MI); + if (OMod == SIOutMods::NONE || !RegOp->isReg() || + RegOp->getSubReg() != AMDGPU::NoSubRegister || + !hasOneNonDBGUseInst(*MRI, RegOp->getReg())) + return false; + + MachineInstr *Def = MRI->getVRegDef(RegOp->getReg()); + MachineOperand *DefOMod = TII->getNamedOperand(*Def, AMDGPU::OpName::omod); + if (!DefOMod || DefOMod->getImm() != SIOutMods::NONE) + return false; + + // Clamp is applied after omod. If the source already has clamp set, don't + // fold it. + if (TII->hasModifiersSet(*Def, AMDGPU::OpName::clamp)) + return false; + + DEBUG(dbgs() << "Folding omod " << *DefOMod << " into " << *Def << '\n'); + + DefOMod->setImm(OMod); + MRI->replaceRegWith(MI.getOperand(0).getReg(), Def->getOperand(0).getReg()); + MI.eraseFromParent(); + return true; +} + bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) { if (skipFunction(*MF.getFunction())) return false; @@ -770,9 +894,9 @@ Next = std::next(I); MachineInstr &MI = *I; - if (!isSafeToFold(MI)) { - // TODO: Try omod also. - tryFoldClamp(MI); + if (!isFoldableCopy(MI)) { + if (!tryFoldOMod(MI)) + tryFoldClamp(MI); continue; } Index: test/CodeGen/AMDGPU/clamp-omod-special-case.mir =================================================================== --- test/CodeGen/AMDGPU/clamp-omod-special-case.mir +++ test/CodeGen/AMDGPU/clamp-omod-special-case.mir @@ -8,6 +8,22 @@ ret void } + define amdgpu_kernel void @v_omod_mul_omod_already_set_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) { + ret void + } + + define amdgpu_kernel void @v_omod_mul_clamp_already_set_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) { + ret void + } + + define amdgpu_kernel void @v_omod_add_omod_already_set_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) { + ret void + } + + define amdgpu_kernel void @v_omod_add_clamp_already_set_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) { + ret void + } + ... --- # GCN-LABEL: name: v_max_self_clamp_not_set_f32 @@ -133,3 +149,274 @@ BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec S_ENDPGM ... +--- +# Don't fold a mul that looks like an omod if itself has omod set + +# GCN-LABEL: name: v_omod_mul_omod_already_set_f32 +# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec +# GCN-NEXT: %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec +name: v_omod_mul_omod_already_set_f32 +tracksRegLiveness: true +registers: + - { id: 0, class: sgpr_64 } + - { id: 1, class: sreg_32_xm0 } + - { id: 2, class: sgpr_32 } + - { id: 3, class: vgpr_32 } + - { id: 4, class: sreg_64_xexec } + - { id: 5, class: sreg_64_xexec } + - { id: 6, class: sreg_32 } + - { id: 7, class: sreg_32 } + - { id: 8, class: sreg_32_xm0 } + - { id: 9, class: sreg_64 } + - { id: 10, class: sreg_32_xm0 } + - { id: 11, class: sreg_32_xm0 } + - { id: 12, class: sgpr_64 } + - { id: 13, class: sgpr_128 } + - { id: 14, class: sreg_32_xm0 } + - { id: 15, class: sreg_64 } + - { id: 16, class: sgpr_128 } + - { id: 17, class: vgpr_32 } + - { id: 18, class: vreg_64 } + - { id: 19, class: vgpr_32 } + - { id: 20, class: vgpr_32 } + - { id: 21, class: vgpr_32 } + - { id: 22, class: vgpr_32 } + - { id: 23, class: vreg_64 } + - { id: 24, class: vgpr_32 } + - { id: 25, class: vreg_64 } + - { id: 26, class: vreg_64 } +liveins: + - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '%vgpr0', virtual-reg: '%3' } +body: | + bb.0 (%ir-block.0): + liveins: %sgpr0_sgpr1, %vgpr0 + + %3 = COPY %vgpr0 + %0 = COPY %sgpr0_sgpr1 + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %25 = REG_SEQUENCE %3, 1, %24, 2 + %10 = S_MOV_B32 61440 + %11 = S_MOV_B32 0 + %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 + %13 = REG_SEQUENCE killed %5, 17, %12, 18 + %14 = S_MOV_B32 2 + %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %16 = REG_SEQUENCE killed %4, 17, %12, 18 + %18 = COPY %26 + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec + %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + S_ENDPGM + +... +--- +# Don't fold a mul that looks like an omod if itself has clamp set +# This might be OK, but would require folding the clamp at the same time. +# GCN-LABEL: name: v_omod_mul_clamp_already_set_f32 +# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec +# GCN-NEXT: %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec + +name: v_omod_mul_clamp_already_set_f32 +tracksRegLiveness: true +registers: + - { id: 0, class: sgpr_64 } + - { id: 1, class: sreg_32_xm0 } + - { id: 2, class: sgpr_32 } + - { id: 3, class: vgpr_32 } + - { id: 4, class: sreg_64_xexec } + - { id: 5, class: sreg_64_xexec } + - { id: 6, class: sreg_32 } + - { id: 7, class: sreg_32 } + - { id: 8, class: sreg_32_xm0 } + - { id: 9, class: sreg_64 } + - { id: 10, class: sreg_32_xm0 } + - { id: 11, class: sreg_32_xm0 } + - { id: 12, class: sgpr_64 } + - { id: 13, class: sgpr_128 } + - { id: 14, class: sreg_32_xm0 } + - { id: 15, class: sreg_64 } + - { id: 16, class: sgpr_128 } + - { id: 17, class: vgpr_32 } + - { id: 18, class: vreg_64 } + - { id: 19, class: vgpr_32 } + - { id: 20, class: vgpr_32 } + - { id: 21, class: vgpr_32 } + - { id: 22, class: vgpr_32 } + - { id: 23, class: vreg_64 } + - { id: 24, class: vgpr_32 } + - { id: 25, class: vreg_64 } + - { id: 26, class: vreg_64 } +liveins: + - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '%vgpr0', virtual-reg: '%3' } +body: | + bb.0 (%ir-block.0): + liveins: %sgpr0_sgpr1, %vgpr0 + + %3 = COPY %vgpr0 + %0 = COPY %sgpr0_sgpr1 + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %25 = REG_SEQUENCE %3, 1, %24, 2 + %10 = S_MOV_B32 61440 + %11 = S_MOV_B32 0 + %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 + %13 = REG_SEQUENCE killed %5, 17, %12, 18 + %14 = S_MOV_B32 2 + %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %16 = REG_SEQUENCE killed %4, 17, %12, 18 + %18 = COPY %26 + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec + %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + S_ENDPGM + +... + + + + + + + + + + + + + +--- +# Don't fold a mul that looks like an omod if itself has omod set + +# GCN-LABEL: name: v_omod_add_omod_already_set_f32 +# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec +# GCN-NEXT: %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec +name: v_omod_add_omod_already_set_f32 +tracksRegLiveness: true +registers: + - { id: 0, class: sgpr_64 } + - { id: 1, class: sreg_32_xm0 } + - { id: 2, class: sgpr_32 } + - { id: 3, class: vgpr_32 } + - { id: 4, class: sreg_64_xexec } + - { id: 5, class: sreg_64_xexec } + - { id: 6, class: sreg_32 } + - { id: 7, class: sreg_32 } + - { id: 8, class: sreg_32_xm0 } + - { id: 9, class: sreg_64 } + - { id: 10, class: sreg_32_xm0 } + - { id: 11, class: sreg_32_xm0 } + - { id: 12, class: sgpr_64 } + - { id: 13, class: sgpr_128 } + - { id: 14, class: sreg_32_xm0 } + - { id: 15, class: sreg_64 } + - { id: 16, class: sgpr_128 } + - { id: 17, class: vgpr_32 } + - { id: 18, class: vreg_64 } + - { id: 19, class: vgpr_32 } + - { id: 20, class: vgpr_32 } + - { id: 21, class: vgpr_32 } + - { id: 22, class: vgpr_32 } + - { id: 23, class: vreg_64 } + - { id: 24, class: vgpr_32 } + - { id: 25, class: vreg_64 } + - { id: 26, class: vreg_64 } +liveins: + - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '%vgpr0', virtual-reg: '%3' } +body: | + bb.0 (%ir-block.0): + liveins: %sgpr0_sgpr1, %vgpr0 + + %3 = COPY %vgpr0 + %0 = COPY %sgpr0_sgpr1 + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %25 = REG_SEQUENCE %3, 1, %24, 2 + %10 = S_MOV_B32 61440 + %11 = S_MOV_B32 0 + %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 + %13 = REG_SEQUENCE killed %5, 17, %12, 18 + %14 = S_MOV_B32 2 + %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %16 = REG_SEQUENCE killed %4, 17, %12, 18 + %18 = COPY %26 + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec + %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + S_ENDPGM + +... +--- +# Don't fold a mul that looks like an omod if itself has clamp set +# This might be OK, but would require folding the clamp at the same time. +# GCN-LABEL: name: v_omod_add_clamp_already_set_f32 +# GCN: %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec +# GCN-NEXT: %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec + +name: v_omod_add_clamp_already_set_f32 +tracksRegLiveness: true +registers: + - { id: 0, class: sgpr_64 } + - { id: 1, class: sreg_32_xm0 } + - { id: 2, class: sgpr_32 } + - { id: 3, class: vgpr_32 } + - { id: 4, class: sreg_64_xexec } + - { id: 5, class: sreg_64_xexec } + - { id: 6, class: sreg_32 } + - { id: 7, class: sreg_32 } + - { id: 8, class: sreg_32_xm0 } + - { id: 9, class: sreg_64 } + - { id: 10, class: sreg_32_xm0 } + - { id: 11, class: sreg_32_xm0 } + - { id: 12, class: sgpr_64 } + - { id: 13, class: sgpr_128 } + - { id: 14, class: sreg_32_xm0 } + - { id: 15, class: sreg_64 } + - { id: 16, class: sgpr_128 } + - { id: 17, class: vgpr_32 } + - { id: 18, class: vreg_64 } + - { id: 19, class: vgpr_32 } + - { id: 20, class: vgpr_32 } + - { id: 21, class: vgpr_32 } + - { id: 22, class: vgpr_32 } + - { id: 23, class: vreg_64 } + - { id: 24, class: vgpr_32 } + - { id: 25, class: vreg_64 } + - { id: 26, class: vreg_64 } +liveins: + - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' } + - { reg: '%vgpr0', virtual-reg: '%3' } +body: | + bb.0 (%ir-block.0): + liveins: %sgpr0_sgpr1, %vgpr0 + + %3 = COPY %vgpr0 + %0 = COPY %sgpr0_sgpr1 + %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`) + %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec + %25 = REG_SEQUENCE %3, 1, %24, 2 + %10 = S_MOV_B32 61440 + %11 = S_MOV_B32 0 + %12 = REG_SEQUENCE killed %11, 1, killed %10, 2 + %13 = REG_SEQUENCE killed %5, 17, %12, 18 + %14 = S_MOV_B32 2 + %26 = V_LSHL_B64 killed %25, 2, implicit %exec + %16 = REG_SEQUENCE killed %4, 17, %12, 18 + %18 = COPY %26 + %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec + %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec + %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec + BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec + S_ENDPGM + +... Index: test/CodeGen/AMDGPU/omod.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/omod.ll @@ -0,0 +1,336 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s +; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s + +; GCN-LABEL: {{^}}v_omod_div2_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e64 v{{[0-9]+}}, [[A]], 1.0 div:2{{$}} +define amdgpu_kernel void @v_omod_div2_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 0.5 + store float %div2, float addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_omod_mul2_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e64 v{{[0-9]+}}, [[A]], 1.0 mul:2{{$}} +define amdgpu_kernel void @v_omod_mul2_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 2.0 + store float %div2, float addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_omod_mul4_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e64 v{{[0-9]+}}, [[A]], 1.0 mul:4{{$}} +define amdgpu_kernel void @v_omod_mul4_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 4.0 + store float %div2, float addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_omod_mul4_multi_use_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}} +; GCN: v_mul_f32_e32 v{{[0-9]+}}, 4.0, [[ADD]]{{$}} +define amdgpu_kernel void @v_omod_mul4_multi_use_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 4.0 + store float %div2, float addrspace(1)* %out.gep + store volatile float %add, float addrspace(1)* undef + ret void +} + +; GCN-LABEL: {{^}}v_omod_mul4_dbg_use_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e64 v{{[0-9]+}}, [[A]], 1.0 mul:4{{$}} +define amdgpu_kernel void @v_omod_mul4_dbg_use_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + call void @llvm.dbg.value(metadata float %add, i64 0, metadata !4, metadata !9), !dbg !10 + %div2 = fmul float %add, 4.0 + store float %div2, float addrspace(1)* %out.gep + ret void +} + +; Clamp is applied after omod, folding both into instruction is OK. +; GCN-LABEL: {{^}}v_clamp_omod_div2_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e64 v{{[0-9]+}}, [[A]], 1.0 clamp div:2{{$}} +define amdgpu_kernel void @v_clamp_omod_div2_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 0.5 + + %max = call float @llvm.maxnum.f32(float %div2, float 0.0) + %clamp = call float @llvm.minnum.f32(float %max, float 1.0) + store float %clamp, float addrspace(1)* %out.gep + ret void +} + +; Cannot fold omod into clamp +; GCN-LABEL: {{^}}v_omod_div2_clamp_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], [[A]], 1.0 clamp{{$}} +; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}} +define amdgpu_kernel void @v_omod_div2_clamp_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %max = call float @llvm.maxnum.f32(float %add, float 0.0) + %clamp = call float @llvm.minnum.f32(float %max, float 1.0) + %div2 = fmul float %clamp, 0.5 + store float %div2, float addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_omod_div2_abs_src_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}} +; GCN: v_mul_f32_e64 v{{[0-9]+}}, |[[ADD]]|, 0.5{{$}} +define amdgpu_kernel void @v_omod_div2_abs_src_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %abs.add = call float @llvm.fabs.f32(float %add) + %div2 = fmul float %abs.add, 0.5 + store float %div2, float addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_omod_add_self_clamp_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e64 v{{[0-9]+}}, [[A]], [[A]] clamp{{$}} +define amdgpu_kernel void @v_omod_add_self_clamp_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, %a + %max = call float @llvm.maxnum.f32(float %add, float 0.0) + %clamp = call float @llvm.minnum.f32(float %max, float 1.0) + store float %clamp, float addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_omod_add_clamp_self_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_max_f32_e64 [[CLAMP:v[0-9]+]], [[A]], [[A]] clamp{{$}} +; GCN: v_add_f32_e32 v{{[0-9]+}}, [[CLAMP]], [[CLAMP]]{{$}} +define amdgpu_kernel void @v_omod_add_clamp_self_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %max = call float @llvm.maxnum.f32(float %a, float 0.0) + %clamp = call float @llvm.minnum.f32(float %max, float 1.0) + %add = fadd float %clamp, %clamp + store float %add, float addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_omod_add_abs_self_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, [[A]] +; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[X]]|, |[[X]]|{{$}} +define amdgpu_kernel void @v_omod_add_abs_self_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %x = fadd float %a, 1.0 + %abs.x = call float @llvm.fabs.f32(float %x) + %add = fadd float %abs.x, %abs.x + store float %add, float addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_omod_add_abs_x_x_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, [[A]] +; GCN: v_add_f32_e64 v{{[0-9]+}}, |[[X]]|, [[X]]{{$}} +define amdgpu_kernel void @v_omod_add_abs_x_x_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %x = fadd float %a, 1.0 + %abs.x = call float @llvm.fabs.f32(float %x) + %add = fadd float %abs.x, %x + store float %add, float addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_omod_add_x_abs_x_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e32 [[X:v[0-9]+]], 1.0, [[A]] +; GCN: v_add_f32_e64 v{{[0-9]+}}, [[X]], |[[X]]|{{$}} +define amdgpu_kernel void @v_omod_add_x_abs_x_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %x = fadd float %a, 1.0 + %abs.x = call float @llvm.fabs.f32(float %x) + %add = fadd float %x, %abs.x + store float %add, float addrspace(1)* %out.gep + ret void +} + +; Don't fold omod into omod into another omod. +; GCN-LABEL: {{^}}v_omod_div2_omod_div2_f32: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e64 [[ADD:v[0-9]+]], [[A]], 1.0 div:2{{$}} +; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}} +define amdgpu_kernel void @v_omod_div2_omod_div2_f32(float addrspace(1)* %out, float addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %div2.0 = fmul float %add, 0.5 + %div2.1 = fmul float %div2.0, 0.5 + store float %div2.1, float addrspace(1)* %out.gep + ret void +} + +; Don't fold omod if denorms enabled +; GCN-LABEL: {{^}}v_omod_div2_f32_denormals: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}} +; GCN: v_mul_f32_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}} +define amdgpu_kernel void @v_omod_div2_f32_denormals(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %div2 = fmul float %add, 0.5 + store float %div2, float addrspace(1)* %out.gep + ret void +} + +; Don't fold omod if denorms enabled for add form. +; GCN-LABEL: {{^}}v_omod_mul2_f32_denormals: +; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]] +; GCN: v_add_f32_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}} +; GCN: v_add_f32_e32 v{{[0-9]+}}, [[ADD]], [[ADD]]{{$}} +define amdgpu_kernel void @v_omod_mul2_f32_denormals(float addrspace(1)* %out, float addrspace(1)* %aptr) #2 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr float, float addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid + %a = load float, float addrspace(1)* %gep0 + %add = fadd float %a, 1.0 + %mul2 = fadd float %add, %add + store float %mul2, float addrspace(1)* %out.gep + ret void +} + +; Don't fold omod if denorms enabled +; GCN-LABEL: {{^}}v_omod_div2_f16_denormals: +; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]] +; VI: v_add_f16_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}} +; VI: v_mul_f16_e32 v{{[0-9]+}}, 0.5, [[ADD]]{{$}} +define amdgpu_kernel void @v_omod_div2_f16_denormals(half addrspace(1)* %out, half addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr half, half addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid + %a = load half, half addrspace(1)* %gep0 + %add = fadd half %a, 1.0 + %div2 = fmul half %add, 0.5 + store half %div2, half addrspace(1)* %out.gep + ret void +} + +; Don't fold omod if denorms enabled for add form. +; GCN-LABEL: {{^}}v_omod_mul2_f16_denormals: +; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]] +; VI: v_add_f16_e32 [[ADD:v[0-9]+]], 1.0, [[A]]{{$}} +; VI: v_add_f16_e32 v{{[0-9]+}}, [[ADD]], [[ADD]]{{$}} +define amdgpu_kernel void @v_omod_mul2_f16_denormals(half addrspace(1)* %out, half addrspace(1)* %aptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr half, half addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid + %a = load half, half addrspace(1)* %gep0 + %add = fadd half %a, 1.0 + %mul2 = fadd half %add, %add + store half %mul2, half addrspace(1)* %out.gep + ret void +} + +; GCN-LABEL: {{^}}v_omod_div2_f16_no_denormals: +; GCN: {{buffer|flat}}_load_ushort [[A:v[0-9]+]] +; VI-NOT: [[A]] +; VI: v_add_f16_e64 [[ADD:v[0-9]+]], [[A]], 1.0 div:2{{$}} +define amdgpu_kernel void @v_omod_div2_f16_no_denormals(half addrspace(1)* %out, half addrspace(1)* %aptr) #3 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr half, half addrspace(1)* %aptr, i32 %tid + %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid + %a = load half, half addrspace(1)* %gep0 + %add = fadd half %a, 1.0 + %div2 = fmul half %add, 0.5 + store half %div2, half addrspace(1)* %out.gep + ret void +} + +declare i32 @llvm.amdgcn.workitem.id.x() #1 +declare float @llvm.fabs.f32(float) #1 +declare float @llvm.floor.f32(float) #1 +declare float @llvm.minnum.f32(float, float) #1 +declare float @llvm.maxnum.f32(float, float) #1 +declare float @llvm.amdgcn.fmed3.f32(float, float, float) #1 +declare double @llvm.fabs.f64(double) #1 +declare double @llvm.minnum.f64(double, double) #1 +declare double @llvm.maxnum.f64(double, double) #1 +declare half @llvm.fabs.f16(half) #1 +declare half @llvm.minnum.f16(half, half) #1 +declare half @llvm.maxnum.f16(half, half) #1 +declare void @llvm.dbg.value(metadata, i64, metadata, metadata) #1 + +attributes #0 = { nounwind } +attributes #1 = { nounwind readnone } +attributes #2 = { nounwind "target-features"="+fp32-denormals" } +attributes #3 = { nounwind "target-features"="-fp64-fp16-denormals" } + +!llvm.dbg.cu = !{!0} +!llvm.module.flags = !{!2, !3} + +!0 = distinct !DICompileUnit(language: DW_LANG_C99, file: !1, isOptimized: true, runtimeVersion: 0, emissionKind: NoDebug) +!1 = !DIFile(filename: "/tmp/foo.cl", directory: "/dev/null") +!2 = !{i32 2, !"Dwarf Version", i32 4} +!3 = !{i32 2, !"Debug Info Version", i32 3} +!4 = !DILocalVariable(name: "add", arg: 1, scope: !5, file: !1, line: 1) +!5 = distinct !DISubprogram(name: "foo", scope: !1, file: !1, line: 1, type: !6, isLocal: false, isDefinition: true, scopeLine: 2, flags: DIFlagPrototyped, isOptimized: true, unit: !0) +!6 = !DISubroutineType(types: !7) +!7 = !{null, !8} +!8 = !DIBasicType(name: "float", size: 32, align: 32) +!9 = !DIExpression() +!10 = !DILocation(line: 1, column: 42, scope: !5)