diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -314,6 +314,8 @@ bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override; bool isCheapToSpeculateCttz() const override; bool isCheapToSpeculateCtlz() const override; + bool shouldSinkOperands(Instruction *I, + SmallVectorImpl &Ops) const override; bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -28,8 +28,9 @@ #include "llvm/CodeGen/ValueTypes.h" #include "llvm/IR/DiagnosticInfo.h" #include "llvm/IR/DiagnosticPrinter.h" -#include "llvm/IR/IntrinsicsRISCV.h" #include "llvm/IR/IRBuilder.h" +#include "llvm/IR/IntrinsicsRISCV.h" +#include "llvm/IR/PatternMatch.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/KnownBits.h" @@ -1030,6 +1031,55 @@ return Subtarget.hasStdExtZbb(); } +/// Check if sinking \p I's operands to I's basic block is profitable, because +/// the operands can be folded into a target instruction, e.g. +/// splats of scalars can fold into vector instructions. +bool RISCVTargetLowering::shouldSinkOperands( + Instruction *I, SmallVectorImpl &Ops) const { + using namespace llvm::PatternMatch; + + if (!I->getType()->isVectorTy() || !Subtarget.hasStdExtV()) + return false; + + auto IsSinker = [&](Instruction *I, int Operand) { + switch (I->getOpcode()) { + case Instruction::Add: + case Instruction::Sub: + case Instruction::Mul: + return true; + default: + return false; + } + }; + + for (auto OpIdx : enumerate(I->operands())) { + if (!IsSinker(I, OpIdx.index())) + continue; + + Instruction *Op = dyn_cast(OpIdx.value().get()); + // Make sure we are not already sinking this operand + if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; })) + continue; + + // We are looking for a splat that can be sunk. + if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()), + m_Undef(), m_ZeroMask()))) + continue; + + // All uses of the shuffle should be sunk to avoid duplicating it across gpr + // and vector registers + for (Use &U : Op->uses()) { + Instruction *Insn = cast(U.getUser()); + if (!IsSinker(Insn, U.getOperandNo())) + return false; + } + + Ops.push_back(&Op->getOperandUse(0)); + Ops.push_back(&OpIdx.value()); + } + return true; +} + bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const { if (VT == MVT::f16 && !Subtarget.hasStdExtZfh()) diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -5,17 +5,16 @@ define void @sink_splat_mul(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_mul: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: addi a1, zero, 1024 +; CHECK-NEXT: addi a2, zero, 1024 ; CHECK-NEXT: .LBB0_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vle32.v v26, (a0) -; CHECK-NEXT: vmul.vv v26, v26, v25 -; CHECK-NEXT: vse32.v v26, (a0) -; CHECK-NEXT: addi a1, a1, -4 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v25, (a0) +; CHECK-NEXT: vmul.vx v25, v25, a1 +; CHECK-NEXT: vse32.v v25, (a0) +; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bnez a1, .LBB0_1 +; CHECK-NEXT: bnez a2, .LBB0_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup ; CHECK-NEXT: ret entry: @@ -42,17 +41,16 @@ define void @sink_splat_add(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_add: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: addi a1, zero, 1024 +; CHECK-NEXT: addi a2, zero, 1024 ; CHECK-NEXT: .LBB1_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vle32.v v26, (a0) -; CHECK-NEXT: vadd.vv v26, v26, v25 -; CHECK-NEXT: vse32.v v26, (a0) -; CHECK-NEXT: addi a1, a1, -4 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v25, (a0) +; CHECK-NEXT: vadd.vx v25, v25, a1 +; CHECK-NEXT: vse32.v v25, (a0) +; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bnez a1, .LBB1_1 +; CHECK-NEXT: bnez a2, .LBB1_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup ; CHECK-NEXT: ret entry: @@ -79,17 +77,16 @@ define void @sink_splat_sub(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_sub: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: addi a1, zero, 1024 +; CHECK-NEXT: addi a2, zero, 1024 ; CHECK-NEXT: .LBB2_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vle32.v v26, (a0) -; CHECK-NEXT: vadd.vv v26, v26, v25 -; CHECK-NEXT: vse32.v v26, (a0) -; CHECK-NEXT: addi a1, a1, -4 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v25, (a0) +; CHECK-NEXT: vadd.vx v25, v25, a1 +; CHECK-NEXT: vse32.v v25, (a0) +; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bnez a1, .LBB2_1 +; CHECK-NEXT: bnez a2, .LBB2_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup ; CHECK-NEXT: ret entry: @@ -116,17 +113,16 @@ define void @sink_splat_rsub(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_rsub: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v25, a1 -; CHECK-NEXT: addi a1, zero, 1024 +; CHECK-NEXT: addi a2, zero, 1024 ; CHECK-NEXT: .LBB3_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vle32.v v26, (a0) -; CHECK-NEXT: vsub.vv v26, v25, v26 -; CHECK-NEXT: vse32.v v26, (a0) -; CHECK-NEXT: addi a1, a1, -4 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v25, (a0) +; CHECK-NEXT: vrsub.vx v25, v25, a1 +; CHECK-NEXT: vse32.v v25, (a0) +; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bnez a1, .LBB3_1 +; CHECK-NEXT: bnez a2, .LBB3_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup ; CHECK-NEXT: ret entry: @@ -153,34 +149,33 @@ define void @sink_splat_mul_scalable(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_mul_scalable: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: srli a7, a5, 1 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a7, a2, 1 ; CHECK-NEXT: addi a3, zero, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB4_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, zero +; CHECK-NEXT: mv t0, zero ; CHECK-NEXT: j .LBB4_5 ; CHECK-NEXT: .LBB4_2: # %vector.ph -; CHECK-NEXT: mv a4, zero +; CHECK-NEXT: mv a5, zero ; CHECK-NEXT: remu a6, a3, a7 -; CHECK-NEXT: sub a3, a3, a6 -; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu -; CHECK-NEXT: vmv.v.x v26, a1 -; CHECK-NEXT: slli a5, a5, 1 +; CHECK-NEXT: sub t0, a3, a6 +; CHECK-NEXT: slli a4, a2, 1 ; CHECK-NEXT: mv a2, a0 ; CHECK-NEXT: .LBB4_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vl2re32.v v28, (a2) -; CHECK-NEXT: vmul.vv v28, v28, v26 -; CHECK-NEXT: vs2r.v v28, (a2) -; CHECK-NEXT: add a4, a4, a7 -; CHECK-NEXT: add a2, a2, a5 -; CHECK-NEXT: bne a4, a3, .LBB4_3 +; CHECK-NEXT: vl2re32.v v26, (a2) +; CHECK-NEXT: vsetvli a3, zero, e32, m2, ta, mu +; CHECK-NEXT: vmul.vx v26, v26, a1 +; CHECK-NEXT: vs2r.v v26, (a2) +; CHECK-NEXT: add a5, a5, a7 +; CHECK-NEXT: add a2, a2, a4 +; CHECK-NEXT: bne a5, t0, .LBB4_3 ; CHECK-NEXT: # %bb.4: # %middle.block ; CHECK-NEXT: beqz a6, .LBB4_7 ; CHECK-NEXT: .LBB4_5: # %for.body.preheader -; CHECK-NEXT: addi a2, a3, -1024 -; CHECK-NEXT: slli a3, a3, 2 +; CHECK-NEXT: addi a2, t0, -1024 +; CHECK-NEXT: slli a3, t0, 2 ; CHECK-NEXT: add a0, a0, a3 ; CHECK-NEXT: .LBB4_6: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 @@ -247,34 +242,33 @@ define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_add_scalable: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: srli a7, a5, 1 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a7, a2, 1 ; CHECK-NEXT: addi a3, zero, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB5_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, zero +; CHECK-NEXT: mv t0, zero ; CHECK-NEXT: j .LBB5_5 ; CHECK-NEXT: .LBB5_2: # %vector.ph -; CHECK-NEXT: mv a4, zero +; CHECK-NEXT: mv a5, zero ; CHECK-NEXT: remu a6, a3, a7 -; CHECK-NEXT: sub a3, a3, a6 -; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu -; CHECK-NEXT: vmv.v.x v26, a1 -; CHECK-NEXT: slli a5, a5, 1 +; CHECK-NEXT: sub t0, a3, a6 +; CHECK-NEXT: slli a4, a2, 1 ; CHECK-NEXT: mv a2, a0 ; CHECK-NEXT: .LBB5_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vl2re32.v v28, (a2) -; CHECK-NEXT: vadd.vv v28, v28, v26 -; CHECK-NEXT: vs2r.v v28, (a2) -; CHECK-NEXT: add a4, a4, a7 -; CHECK-NEXT: add a2, a2, a5 -; CHECK-NEXT: bne a4, a3, .LBB5_3 +; CHECK-NEXT: vl2re32.v v26, (a2) +; CHECK-NEXT: vsetvli a3, zero, e32, m2, ta, mu +; CHECK-NEXT: vadd.vx v26, v26, a1 +; CHECK-NEXT: vs2r.v v26, (a2) +; CHECK-NEXT: add a5, a5, a7 +; CHECK-NEXT: add a2, a2, a4 +; CHECK-NEXT: bne a5, t0, .LBB5_3 ; CHECK-NEXT: # %bb.4: # %middle.block ; CHECK-NEXT: beqz a6, .LBB5_7 ; CHECK-NEXT: .LBB5_5: # %for.body.preheader -; CHECK-NEXT: addi a2, a3, -1024 -; CHECK-NEXT: slli a3, a3, 2 +; CHECK-NEXT: addi a2, t0, -1024 +; CHECK-NEXT: slli a3, t0, 2 ; CHECK-NEXT: add a0, a0, a3 ; CHECK-NEXT: .LBB5_6: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 @@ -341,34 +335,33 @@ define void @sink_splat_sub_scalable(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_sub_scalable: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: srli a7, a5, 1 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a7, a2, 1 ; CHECK-NEXT: addi a3, zero, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB6_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, zero +; CHECK-NEXT: mv t0, zero ; CHECK-NEXT: j .LBB6_5 ; CHECK-NEXT: .LBB6_2: # %vector.ph -; CHECK-NEXT: mv a4, zero +; CHECK-NEXT: mv a5, zero ; CHECK-NEXT: remu a6, a3, a7 -; CHECK-NEXT: sub a3, a3, a6 -; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu -; CHECK-NEXT: vmv.v.x v26, a1 -; CHECK-NEXT: slli a5, a5, 1 +; CHECK-NEXT: sub t0, a3, a6 +; CHECK-NEXT: slli a4, a2, 1 ; CHECK-NEXT: mv a2, a0 ; CHECK-NEXT: .LBB6_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vl2re32.v v28, (a2) -; CHECK-NEXT: vsub.vv v28, v28, v26 -; CHECK-NEXT: vs2r.v v28, (a2) -; CHECK-NEXT: add a4, a4, a7 -; CHECK-NEXT: add a2, a2, a5 -; CHECK-NEXT: bne a4, a3, .LBB6_3 +; CHECK-NEXT: vl2re32.v v26, (a2) +; CHECK-NEXT: vsetvli a3, zero, e32, m2, ta, mu +; CHECK-NEXT: vsub.vx v26, v26, a1 +; CHECK-NEXT: vs2r.v v26, (a2) +; CHECK-NEXT: add a5, a5, a7 +; CHECK-NEXT: add a2, a2, a4 +; CHECK-NEXT: bne a5, t0, .LBB6_3 ; CHECK-NEXT: # %bb.4: # %middle.block ; CHECK-NEXT: beqz a6, .LBB6_7 ; CHECK-NEXT: .LBB6_5: # %for.body.preheader -; CHECK-NEXT: addi a2, a3, -1024 -; CHECK-NEXT: slli a3, a3, 2 +; CHECK-NEXT: addi a2, t0, -1024 +; CHECK-NEXT: slli a3, t0, 2 ; CHECK-NEXT: add a0, a0, a3 ; CHECK-NEXT: .LBB6_6: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 @@ -435,34 +428,33 @@ define void @sink_splat_rsub_scalable(i32* nocapture %a, i32 signext %x) { ; CHECK-LABEL: sink_splat_rsub_scalable: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: csrr a5, vlenb -; CHECK-NEXT: srli a7, a5, 1 +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a7, a2, 1 ; CHECK-NEXT: addi a3, zero, 1024 ; CHECK-NEXT: bgeu a3, a7, .LBB7_2 ; CHECK-NEXT: # %bb.1: -; CHECK-NEXT: mv a3, zero +; CHECK-NEXT: mv t0, zero ; CHECK-NEXT: j .LBB7_5 ; CHECK-NEXT: .LBB7_2: # %vector.ph -; CHECK-NEXT: mv a4, zero +; CHECK-NEXT: mv a5, zero ; CHECK-NEXT: remu a6, a3, a7 -; CHECK-NEXT: sub a3, a3, a6 -; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, mu -; CHECK-NEXT: vmv.v.x v26, a1 -; CHECK-NEXT: slli a5, a5, 1 +; CHECK-NEXT: sub t0, a3, a6 +; CHECK-NEXT: slli a4, a2, 1 ; CHECK-NEXT: mv a2, a0 ; CHECK-NEXT: .LBB7_3: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vl2re32.v v28, (a2) -; CHECK-NEXT: vsub.vv v28, v26, v28 -; CHECK-NEXT: vs2r.v v28, (a2) -; CHECK-NEXT: add a4, a4, a7 -; CHECK-NEXT: add a2, a2, a5 -; CHECK-NEXT: bne a4, a3, .LBB7_3 +; CHECK-NEXT: vl2re32.v v26, (a2) +; CHECK-NEXT: vsetvli a3, zero, e32, m2, ta, mu +; CHECK-NEXT: vrsub.vx v26, v26, a1 +; CHECK-NEXT: vs2r.v v26, (a2) +; CHECK-NEXT: add a5, a5, a7 +; CHECK-NEXT: add a2, a2, a4 +; CHECK-NEXT: bne a5, t0, .LBB7_3 ; CHECK-NEXT: # %bb.4: # %middle.block ; CHECK-NEXT: beqz a6, .LBB7_7 ; CHECK-NEXT: .LBB7_5: # %for.body.preheader -; CHECK-NEXT: addi a2, a3, -1024 -; CHECK-NEXT: slli a3, a3, 2 +; CHECK-NEXT: addi a2, t0, -1024 +; CHECK-NEXT: slli a3, t0, 2 ; CHECK-NEXT: add a0, a0, a3 ; CHECK-NEXT: .LBB7_6: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1