diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -314,6 +314,8 @@ bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override; bool isCheapToSpeculateCttz() const override; bool isCheapToSpeculateCtlz() const override; + bool shouldSinkOperands(Instruction *I, + SmallVectorImpl &Ops) const override; bool isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const override; diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1027,6 +1027,53 @@ return Subtarget.hasStdExtZbb(); } +/// Check if sinking \p I's operands to I's basic block is profitable, because +/// the operands can be folded into a target instruction, e.g. +/// splats of scalars can fold into vector instructions. +bool RISCVTargetLowering::shouldSinkOperands(Instruction *I, + SmallVectorImpl &Ops) const { + if (!I->getType()->isVectorTy() || !Subtarget.hasStdExtV()) + return false; + + switch (I->getOpcode()) { + case Instruction::Add: + case Instruction::Sub: + case Instruction::Mul: { + // TODO: Support more opcodes. + bool IsProfitable = false; + for (auto &Op : I->operands()) { + // Make sure we are not already sinking this operand + if (any_of(Ops, [&](Use *U) { return U->get() == Op; })) + continue; + + ShuffleVectorInst *Shuffle = dyn_cast(Op); + if (!Shuffle || !Shuffle->isZeroEltSplat()) + continue; + + Value *ShuffleOperand = Shuffle->getOperand(0); + InsertElementInst *Insert = dyn_cast(ShuffleOperand); + if (!Insert) + continue; + + ConstantInt *ElementConstant = + dyn_cast(Insert->getOperand(2)); + // Check that the insertelement is inserting into element 0 + if (!ElementConstant || ElementConstant->getZExtValue() != 0) + continue; + + Ops.push_back(&Shuffle->getOperandUse(0)); + Ops.push_back(&Op); + IsProfitable = true; + } + + return IsProfitable; + } + default: + return false; + } + return false; +} + bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, bool ForCodeSize) const { if (VT == MVT::f16 && !Subtarget.hasStdExtZfh()) diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -0,0 +1,521 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-v \ +; RUN: -riscv-v-vector-bits-min=128 | FileCheck %s + +define void @sink_splat_mul(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_mul: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: .LBB0_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v25, (a0) +; CHECK-NEXT: vmul.vx v25, v25, a1 +; CHECK-NEXT: vse32.v v25, (a0) +; CHECK-NEXT: addi a2, a2, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a2, .LBB0_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %a, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = mul <4 x i32> %wide.load, %broadcast.splat + %3 = bitcast i32* %0 to <4 x i32>* + store <4 x i32> %2, <4 x i32>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_add(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_add: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: .LBB1_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v25, (a0) +; CHECK-NEXT: vadd.vx v25, v25, a1 +; CHECK-NEXT: vse32.v v25, (a0) +; CHECK-NEXT: addi a2, a2, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a2, .LBB1_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %a, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = add <4 x i32> %wide.load, %broadcast.splat + %3 = bitcast i32* %0 to <4 x i32>* + store <4 x i32> %2, <4 x i32>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_sub(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_sub: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: .LBB2_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v25, (a0) +; CHECK-NEXT: vadd.vx v25, v25, a1 +; CHECK-NEXT: vse32.v v25, (a0) +; CHECK-NEXT: addi a2, a2, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a2, .LBB2_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %a, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = add <4 x i32> %wide.load, %broadcast.splat + %3 = bitcast i32* %0 to <4 x i32>* + store <4 x i32> %2, <4 x i32>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_rsub(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_rsub: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi a2, zero, 1024 +; CHECK-NEXT: .LBB3_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v25, (a0) +; CHECK-NEXT: vrsub.vx v25, v25, a1 +; CHECK-NEXT: vse32.v v25, (a0) +; CHECK-NEXT: addi a2, a2, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a2, .LBB3_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %a, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = sub <4 x i32> %broadcast.splat, %wide.load + %3 = bitcast i32* %0 to <4 x i32>* + store <4 x i32> %2, <4 x i32>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_mul_scalable(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_mul_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a7, a2, 1 +; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: bgeu a3, a7, .LBB4_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: j .LBB4_5 +; CHECK-NEXT: .LBB4_2: # %vector.ph +; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: remu a6, a3, a7 +; CHECK-NEXT: sub t0, a3, a6 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: .LBB4_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl2re32.v v26, (a2) +; CHECK-NEXT: vsetvli a3, zero, e32, m2, ta, mu +; CHECK-NEXT: vmul.vx v26, v26, a1 +; CHECK-NEXT: vs2r.v v26, (a2) +; CHECK-NEXT: add a5, a5, a7 +; CHECK-NEXT: add a2, a2, a4 +; CHECK-NEXT: bne a5, t0, .LBB4_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a6, .LBB4_7 +; CHECK-NEXT: .LBB4_5: # %for.body.preheader +; CHECK-NEXT: addi a2, t0, -1024 +; CHECK-NEXT: slli a3, t0, 2 +; CHECK-NEXT: add a0, a0, a3 +; CHECK-NEXT: .LBB4_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: lw a3, 0(a0) +; CHECK-NEXT: mv a4, a2 +; CHECK-NEXT: mulw a2, a3, a1 +; CHECK-NEXT: sw a2, 0(a0) +; CHECK-NEXT: addi a2, a4, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bgeu a2, a4, .LBB4_6 +; CHECK-NEXT: .LBB4_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 2 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 2 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement poison, i32 %x, i32 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 2 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds i32, i32* %a, i64 %index + %7 = bitcast i32* %6 to * + %wide.load = load , * %7, align 4 + %8 = mul %wide.load, %broadcast.splat + %9 = bitcast i32* %6 to * + store %8, * %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + %11 = load i32, i32* %arrayidx, align 4 + %mul = mul i32 %11, %x + store i32 %mul, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_add_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a7, a2, 1 +; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: bgeu a3, a7, .LBB5_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: j .LBB5_5 +; CHECK-NEXT: .LBB5_2: # %vector.ph +; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: remu a6, a3, a7 +; CHECK-NEXT: sub t0, a3, a6 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: .LBB5_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl2re32.v v26, (a2) +; CHECK-NEXT: vsetvli a3, zero, e32, m2, ta, mu +; CHECK-NEXT: vadd.vx v26, v26, a1 +; CHECK-NEXT: vs2r.v v26, (a2) +; CHECK-NEXT: add a5, a5, a7 +; CHECK-NEXT: add a2, a2, a4 +; CHECK-NEXT: bne a5, t0, .LBB5_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a6, .LBB5_7 +; CHECK-NEXT: .LBB5_5: # %for.body.preheader +; CHECK-NEXT: addi a2, t0, -1024 +; CHECK-NEXT: slli a3, t0, 2 +; CHECK-NEXT: add a0, a0, a3 +; CHECK-NEXT: .LBB5_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: lw a3, 0(a0) +; CHECK-NEXT: mv a4, a2 +; CHECK-NEXT: addw a2, a3, a1 +; CHECK-NEXT: sw a2, 0(a0) +; CHECK-NEXT: addi a2, a4, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bgeu a2, a4, .LBB5_6 +; CHECK-NEXT: .LBB5_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 2 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 2 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement poison, i32 %x, i32 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 2 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds i32, i32* %a, i64 %index + %7 = bitcast i32* %6 to * + %wide.load = load , * %7, align 4 + %8 = add %wide.load, %broadcast.splat + %9 = bitcast i32* %6 to * + store %8, * %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + %11 = load i32, i32* %arrayidx, align 4 + %add = add i32 %11, %x + store i32 %add, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +define void @sink_splat_sub_scalable(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_sub_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a7, a2, 1 +; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: bgeu a3, a7, .LBB6_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: j .LBB6_5 +; CHECK-NEXT: .LBB6_2: # %vector.ph +; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: remu a6, a3, a7 +; CHECK-NEXT: sub t0, a3, a6 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: .LBB6_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl2re32.v v26, (a2) +; CHECK-NEXT: vsetvli a3, zero, e32, m2, ta, mu +; CHECK-NEXT: vsub.vx v26, v26, a1 +; CHECK-NEXT: vs2r.v v26, (a2) +; CHECK-NEXT: add a5, a5, a7 +; CHECK-NEXT: add a2, a2, a4 +; CHECK-NEXT: bne a5, t0, .LBB6_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a6, .LBB6_7 +; CHECK-NEXT: .LBB6_5: # %for.body.preheader +; CHECK-NEXT: addi a2, t0, -1024 +; CHECK-NEXT: slli a3, t0, 2 +; CHECK-NEXT: add a0, a0, a3 +; CHECK-NEXT: .LBB6_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: lw a3, 0(a0) +; CHECK-NEXT: mv a4, a2 +; CHECK-NEXT: addw a2, a3, a1 +; CHECK-NEXT: sw a2, 0(a0) +; CHECK-NEXT: addi a2, a4, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bgeu a2, a4, .LBB6_6 +; CHECK-NEXT: .LBB6_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 2 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 2 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement poison, i32 %x, i32 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 2 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds i32, i32* %a, i64 %index + %7 = bitcast i32* %6 to * + %wide.load = load , * %7, align 4 + %8 = sub %wide.load, %broadcast.splat + %9 = bitcast i32* %6 to * + store %8, * %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + %11 = load i32, i32* %arrayidx, align 4 + %add = add i32 %11, %x + store i32 %add, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +define void @sink_splat_rsub_scalable(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_rsub_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a2, vlenb +; CHECK-NEXT: srli a7, a2, 1 +; CHECK-NEXT: addi a3, zero, 1024 +; CHECK-NEXT: bgeu a3, a7, .LBB7_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv t0, zero +; CHECK-NEXT: j .LBB7_5 +; CHECK-NEXT: .LBB7_2: # %vector.ph +; CHECK-NEXT: mv a5, zero +; CHECK-NEXT: remu a6, a3, a7 +; CHECK-NEXT: sub t0, a3, a6 +; CHECK-NEXT: slli a4, a2, 1 +; CHECK-NEXT: mv a2, a0 +; CHECK-NEXT: .LBB7_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl2re32.v v26, (a2) +; CHECK-NEXT: vsetvli a3, zero, e32, m2, ta, mu +; CHECK-NEXT: vrsub.vx v26, v26, a1 +; CHECK-NEXT: vs2r.v v26, (a2) +; CHECK-NEXT: add a5, a5, a7 +; CHECK-NEXT: add a2, a2, a4 +; CHECK-NEXT: bne a5, t0, .LBB7_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a6, .LBB7_7 +; CHECK-NEXT: .LBB7_5: # %for.body.preheader +; CHECK-NEXT: addi a2, t0, -1024 +; CHECK-NEXT: slli a3, t0, 2 +; CHECK-NEXT: add a0, a0, a3 +; CHECK-NEXT: .LBB7_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: lw a3, 0(a0) +; CHECK-NEXT: mv a4, a2 +; CHECK-NEXT: addw a2, a3, a1 +; CHECK-NEXT: sw a2, 0(a0) +; CHECK-NEXT: addi a2, a4, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bgeu a2, a4, .LBB7_6 +; CHECK-NEXT: .LBB7_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 2 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 2 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement poison, i32 %x, i32 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 2 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds i32, i32* %a, i64 %index + %7 = bitcast i32* %6 to * + %wide.load = load , * %7, align 4 + %8 = sub %broadcast.splat, %wide.load + %9 = bitcast i32* %6 to * + store %8, * %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + %11 = load i32, i32* %arrayidx, align 4 + %add = add i32 %11, %x + store i32 %add, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +declare i64 @llvm.vscale.i64()