Index: llvm/lib/Target/RISCV/RISCVISelLowering.cpp =================================================================== --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1260,10 +1260,6 @@ switch (II->getIntrinsicID()) { case Intrinsic::fma: case Intrinsic::vp_fma: - return Operand == 0 || Operand == 1; - // FIXME: Our patterns can only match vx/vf instructions when the splat - // it on the RHS, because TableGen doesn't recognize our VP operations - // as commutative. case Intrinsic::vp_add: case Intrinsic::vp_mul: case Intrinsic::vp_and: @@ -1271,6 +1267,7 @@ case Intrinsic::vp_xor: case Intrinsic::vp_fadd: case Intrinsic::vp_fmul: + return Operand == 0 || Operand == 1; case Intrinsic::vp_shl: case Intrinsic::vp_lshr: case Intrinsic::vp_ashr: Index: llvm/test/CodeGen/RISCV/rvv/sink-splat-operands-commute.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/RISCV/rvv/sink-splat-operands-commute.ll @@ -0,0 +1,906 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \ +; RUN: -riscv-v-vector-bits-min=128 | FileCheck %s + +define void @sink_splat_mul(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_mul: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li a2, 1024 +; CHECK-NEXT: .LBB0_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vmul.vx v8, v8, a1 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: addi a2, a2, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a2, .LBB0_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %a, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = mul <4 x i32> %broadcast.splat, %wide.load + %3 = bitcast i32* %0 to <4 x i32>* + store <4 x i32> %2, <4 x i32>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_add(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_add: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li a2, 1024 +; CHECK-NEXT: .LBB1_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vadd.vx v8, v8, a1 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: addi a2, a2, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a2, .LBB1_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %a, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = add <4 x i32> %broadcast.splat, %wide.load + %3 = bitcast i32* %0 to <4 x i32>* + store <4 x i32> %2, <4 x i32>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_and(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_and: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li a2, 1024 +; CHECK-NEXT: .LBB2_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vand.vx v8, v8, a1 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: addi a2, a2, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a2, .LBB2_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %a, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = and <4 x i32> %broadcast.splat, %wide.load + %3 = bitcast i32* %0 to <4 x i32>* + store <4 x i32> %2, <4 x i32>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_or(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_or: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li a2, 1024 +; CHECK-NEXT: .LBB3_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vor.vx v8, v8, a1 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: addi a2, a2, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a2, .LBB3_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %a, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = or <4 x i32> %broadcast.splat, %wide.load + %3 = bitcast i32* %0 to <4 x i32>* + store <4 x i32> %2, <4 x i32>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_xor(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_xor: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li a2, 1024 +; CHECK-NEXT: .LBB4_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vxor.vx v8, v8, a1 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: addi a2, a2, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a2, .LBB4_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0 + %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds i32, i32* %a, i64 %index + %1 = bitcast i32* %0 to <4 x i32>* + %wide.load = load <4 x i32>, <4 x i32>* %1, align 4 + %2 = xor <4 x i32> %broadcast.splat, %wide.load + %3 = bitcast i32* %0 to <4 x i32>* + store <4 x i32> %2, <4 x i32>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_mul_scalable(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_mul_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a6, vlenb +; CHECK-NEXT: srli a2, a6, 1 +; CHECK-NEXT: li a3, 1024 +; CHECK-NEXT: bgeu a3, a2, .LBB5_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: j .LBB5_5 +; CHECK-NEXT: .LBB5_2: # %vector.ph +; CHECK-NEXT: li a5, 0 +; CHECK-NEXT: remu a4, a3, a2 +; CHECK-NEXT: sub a3, a3, a4 +; CHECK-NEXT: slli a6, a6, 1 +; CHECK-NEXT: mv a7, a0 +; CHECK-NEXT: .LBB5_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl2re32.v v8, (a7) +; CHECK-NEXT: vsetvli t0, zero, e32, m2, ta, mu +; CHECK-NEXT: vmul.vx v8, v8, a1 +; CHECK-NEXT: vs2r.v v8, (a7) +; CHECK-NEXT: add a5, a5, a2 +; CHECK-NEXT: add a7, a7, a6 +; CHECK-NEXT: bne a5, a3, .LBB5_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a4, .LBB5_7 +; CHECK-NEXT: .LBB5_5: # %for.body.preheader +; CHECK-NEXT: addi a2, a3, -1024 +; CHECK-NEXT: slli a3, a3, 2 +; CHECK-NEXT: add a0, a0, a3 +; CHECK-NEXT: .LBB5_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: lw a3, 0(a0) +; CHECK-NEXT: mv a4, a2 +; CHECK-NEXT: mulw a2, a3, a1 +; CHECK-NEXT: sw a2, 0(a0) +; CHECK-NEXT: addi a2, a4, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bgeu a2, a4, .LBB5_6 +; CHECK-NEXT: .LBB5_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 2 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 2 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement poison, i32 %x, i32 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 2 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds i32, i32* %a, i64 %index + %7 = bitcast i32* %6 to * + %wide.load = load , * %7, align 4 + %8 = mul %broadcast.splat, %wide.load + %9 = bitcast i32* %6 to * + store %8, * %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + %11 = load i32, i32* %arrayidx, align 4 + %mul = mul i32 %11, %x + store i32 %mul, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_add_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a6, vlenb +; CHECK-NEXT: srli a2, a6, 1 +; CHECK-NEXT: li a3, 1024 +; CHECK-NEXT: bgeu a3, a2, .LBB6_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: j .LBB6_5 +; CHECK-NEXT: .LBB6_2: # %vector.ph +; CHECK-NEXT: li a5, 0 +; CHECK-NEXT: remu a4, a3, a2 +; CHECK-NEXT: sub a3, a3, a4 +; CHECK-NEXT: slli a6, a6, 1 +; CHECK-NEXT: mv a7, a0 +; CHECK-NEXT: .LBB6_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl2re32.v v8, (a7) +; CHECK-NEXT: vsetvli t0, zero, e32, m2, ta, mu +; CHECK-NEXT: vadd.vx v8, v8, a1 +; CHECK-NEXT: vs2r.v v8, (a7) +; CHECK-NEXT: add a5, a5, a2 +; CHECK-NEXT: add a7, a7, a6 +; CHECK-NEXT: bne a5, a3, .LBB6_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a4, .LBB6_7 +; CHECK-NEXT: .LBB6_5: # %for.body.preheader +; CHECK-NEXT: addi a2, a3, -1024 +; CHECK-NEXT: slli a3, a3, 2 +; CHECK-NEXT: add a0, a0, a3 +; CHECK-NEXT: .LBB6_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: lw a3, 0(a0) +; CHECK-NEXT: mv a4, a2 +; CHECK-NEXT: addw a2, a3, a1 +; CHECK-NEXT: sw a2, 0(a0) +; CHECK-NEXT: addi a2, a4, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bgeu a2, a4, .LBB6_6 +; CHECK-NEXT: .LBB6_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 2 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 2 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement poison, i32 %x, i32 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 2 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds i32, i32* %a, i64 %index + %7 = bitcast i32* %6 to * + %wide.load = load , * %7, align 4 + %8 = add %broadcast.splat, %wide.load + %9 = bitcast i32* %6 to * + store %8, * %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + %11 = load i32, i32* %arrayidx, align 4 + %add = add i32 %11, %x + store i32 %add, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +define void @sink_splat_and_scalable(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_and_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a6, vlenb +; CHECK-NEXT: srli a2, a6, 1 +; CHECK-NEXT: li a3, 1024 +; CHECK-NEXT: bgeu a3, a2, .LBB7_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: j .LBB7_5 +; CHECK-NEXT: .LBB7_2: # %vector.ph +; CHECK-NEXT: li a5, 0 +; CHECK-NEXT: remu a4, a3, a2 +; CHECK-NEXT: sub a3, a3, a4 +; CHECK-NEXT: slli a6, a6, 1 +; CHECK-NEXT: mv a7, a0 +; CHECK-NEXT: .LBB7_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl2re32.v v8, (a7) +; CHECK-NEXT: vsetvli t0, zero, e32, m2, ta, mu +; CHECK-NEXT: vand.vx v8, v8, a1 +; CHECK-NEXT: vs2r.v v8, (a7) +; CHECK-NEXT: add a5, a5, a2 +; CHECK-NEXT: add a7, a7, a6 +; CHECK-NEXT: bne a5, a3, .LBB7_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a4, .LBB7_7 +; CHECK-NEXT: .LBB7_5: # %for.body.preheader +; CHECK-NEXT: addi a2, a3, -1024 +; CHECK-NEXT: slli a3, a3, 2 +; CHECK-NEXT: add a0, a0, a3 +; CHECK-NEXT: .LBB7_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: lw a3, 0(a0) +; CHECK-NEXT: mv a4, a2 +; CHECK-NEXT: and a2, a3, a1 +; CHECK-NEXT: sw a2, 0(a0) +; CHECK-NEXT: addi a2, a4, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bgeu a2, a4, .LBB7_6 +; CHECK-NEXT: .LBB7_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 2 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 2 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement poison, i32 %x, i32 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 2 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds i32, i32* %a, i64 %index + %7 = bitcast i32* %6 to * + %wide.load = load , * %7, align 4 + %8 = and %broadcast.splat, %wide.load + %9 = bitcast i32* %6 to * + store %8, * %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + %11 = load i32, i32* %arrayidx, align 4 + %and = and i32 %11, %x + store i32 %and, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +define void @sink_splat_or_scalable(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_or_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a6, vlenb +; CHECK-NEXT: srli a2, a6, 1 +; CHECK-NEXT: li a3, 1024 +; CHECK-NEXT: bgeu a3, a2, .LBB8_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: j .LBB8_5 +; CHECK-NEXT: .LBB8_2: # %vector.ph +; CHECK-NEXT: li a5, 0 +; CHECK-NEXT: remu a4, a3, a2 +; CHECK-NEXT: sub a3, a3, a4 +; CHECK-NEXT: slli a6, a6, 1 +; CHECK-NEXT: mv a7, a0 +; CHECK-NEXT: .LBB8_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl2re32.v v8, (a7) +; CHECK-NEXT: vsetvli t0, zero, e32, m2, ta, mu +; CHECK-NEXT: vor.vx v8, v8, a1 +; CHECK-NEXT: vs2r.v v8, (a7) +; CHECK-NEXT: add a5, a5, a2 +; CHECK-NEXT: add a7, a7, a6 +; CHECK-NEXT: bne a5, a3, .LBB8_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a4, .LBB8_7 +; CHECK-NEXT: .LBB8_5: # %for.body.preheader +; CHECK-NEXT: addi a2, a3, -1024 +; CHECK-NEXT: slli a3, a3, 2 +; CHECK-NEXT: add a0, a0, a3 +; CHECK-NEXT: .LBB8_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: lw a3, 0(a0) +; CHECK-NEXT: mv a4, a2 +; CHECK-NEXT: or a2, a3, a1 +; CHECK-NEXT: sw a2, 0(a0) +; CHECK-NEXT: addi a2, a4, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bgeu a2, a4, .LBB8_6 +; CHECK-NEXT: .LBB8_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 2 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 2 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement poison, i32 %x, i32 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 2 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds i32, i32* %a, i64 %index + %7 = bitcast i32* %6 to * + %wide.load = load , * %7, align 4 + %8 = or %broadcast.splat, %wide.load + %9 = bitcast i32* %6 to * + store %8, * %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + %11 = load i32, i32* %arrayidx, align 4 + %or = or i32 %11, %x + store i32 %or, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +define void @sink_splat_xor_scalable(i32* nocapture %a, i32 signext %x) { +; CHECK-LABEL: sink_splat_xor_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a6, vlenb +; CHECK-NEXT: srli a2, a6, 1 +; CHECK-NEXT: li a3, 1024 +; CHECK-NEXT: bgeu a3, a2, .LBB9_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: j .LBB9_5 +; CHECK-NEXT: .LBB9_2: # %vector.ph +; CHECK-NEXT: li a5, 0 +; CHECK-NEXT: remu a4, a3, a2 +; CHECK-NEXT: sub a3, a3, a4 +; CHECK-NEXT: slli a6, a6, 1 +; CHECK-NEXT: mv a7, a0 +; CHECK-NEXT: .LBB9_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl2re32.v v8, (a7) +; CHECK-NEXT: vsetvli t0, zero, e32, m2, ta, mu +; CHECK-NEXT: vxor.vx v8, v8, a1 +; CHECK-NEXT: vs2r.v v8, (a7) +; CHECK-NEXT: add a5, a5, a2 +; CHECK-NEXT: add a7, a7, a6 +; CHECK-NEXT: bne a5, a3, .LBB9_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a4, .LBB9_7 +; CHECK-NEXT: .LBB9_5: # %for.body.preheader +; CHECK-NEXT: addi a2, a3, -1024 +; CHECK-NEXT: slli a3, a3, 2 +; CHECK-NEXT: add a0, a0, a3 +; CHECK-NEXT: .LBB9_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: lw a3, 0(a0) +; CHECK-NEXT: mv a4, a2 +; CHECK-NEXT: xor a2, a3, a1 +; CHECK-NEXT: sw a2, 0(a0) +; CHECK-NEXT: addi a2, a4, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bgeu a2, a4, .LBB9_6 +; CHECK-NEXT: .LBB9_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 2 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 2 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement poison, i32 %x, i32 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 2 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds i32, i32* %a, i64 %index + %7 = bitcast i32* %6 to * + %wide.load = load , * %7, align 4 + %8 = xor %broadcast.splat, %wide.load + %9 = bitcast i32* %6 to * + store %8, * %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv + %11 = load i32, i32* %arrayidx, align 4 + %xor = xor i32 %11, %x + store i32 %xor, i32* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +define void @sink_splat_fmul(float* nocapture %a, float %x) { +; CHECK-LABEL: sink_splat_fmul: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li a1, 1024 +; CHECK-NEXT: .LBB10_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: addi a1, a1, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a1, .LBB10_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 + %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds float, float* %a, i64 %index + %1 = bitcast float* %0 to <4 x float>* + %wide.load = load <4 x float>, <4 x float>* %1, align 4 + %2 = fmul <4 x float> %broadcast.splat, %wide.load + %3 = bitcast float* %0 to <4 x float>* + store <4 x float> %2, <4 x float>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_fadd(float* nocapture %a, float %x) { +; CHECK-LABEL: sink_splat_fadd: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: li a1, 1024 +; CHECK-NEXT: .LBB11_1: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: addi a1, a1, -4 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: bnez a1, .LBB11_1 +; CHECK-NEXT: # %bb.2: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0 + %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer + br label %vector.body + +vector.body: ; preds = %vector.body, %entry + %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] + %0 = getelementptr inbounds float, float* %a, i64 %index + %1 = bitcast float* %0 to <4 x float>* + %wide.load = load <4 x float>, <4 x float>* %1, align 4 + %2 = fadd <4 x float> %broadcast.splat, %wide.load + %3 = bitcast float* %0 to <4 x float>* + store <4 x float> %2, <4 x float>* %3, align 4 + %index.next = add nuw i64 %index, 4 + %4 = icmp eq i64 %index.next, 1024 + br i1 %4, label %for.cond.cleanup, label %vector.body + +for.cond.cleanup: ; preds = %vector.body + ret void +} + +define void @sink_splat_fmul_scalable(float* nocapture %a, float %x) { +; CHECK-LABEL: sink_splat_fmul_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 2 +; CHECK-NEXT: li a3, 1024 +; CHECK-NEXT: bgeu a3, a2, .LBB12_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: j .LBB12_5 +; CHECK-NEXT: .LBB12_2: # %vector.ph +; CHECK-NEXT: li a5, 0 +; CHECK-NEXT: remu a4, a3, a2 +; CHECK-NEXT: sub a3, a3, a4 +; CHECK-NEXT: mv a6, a0 +; CHECK-NEXT: .LBB12_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl1re32.v v8, (a6) +; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, mu +; CHECK-NEXT: vfmul.vf v8, v8, fa0 +; CHECK-NEXT: vs1r.v v8, (a6) +; CHECK-NEXT: add a5, a5, a2 +; CHECK-NEXT: add a6, a6, a1 +; CHECK-NEXT: bne a5, a3, .LBB12_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a4, .LBB12_7 +; CHECK-NEXT: .LBB12_5: # %for.body.preheader +; CHECK-NEXT: addi a1, a3, -1024 +; CHECK-NEXT: slli a2, a3, 2 +; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: .LBB12_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: flw ft0, 0(a0) +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: fmul.s ft0, ft0, fa0 +; CHECK-NEXT: fsw ft0, 0(a0) +; CHECK-NEXT: addi a1, a1, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bgeu a1, a2, .LBB12_6 +; CHECK-NEXT: .LBB12_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 1 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 1 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement poison, float %x, i32 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 1 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds float, float* %a, i64 %index + %7 = bitcast float* %6 to * + %wide.load = load , * %7, align 4 + %8 = fmul %broadcast.splat, %wide.load + %9 = bitcast float* %6 to * + store %8, * %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv + %11 = load float, float* %arrayidx, align 4 + %mul = fmul float %11, %x + store float %mul, float* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) { +; CHECK-LABEL: sink_splat_fadd_scalable: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 2 +; CHECK-NEXT: li a3, 1024 +; CHECK-NEXT: bgeu a3, a2, .LBB13_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a3, 0 +; CHECK-NEXT: j .LBB13_5 +; CHECK-NEXT: .LBB13_2: # %vector.ph +; CHECK-NEXT: li a5, 0 +; CHECK-NEXT: remu a4, a3, a2 +; CHECK-NEXT: sub a3, a3, a4 +; CHECK-NEXT: mv a6, a0 +; CHECK-NEXT: .LBB13_3: # %vector.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: vl1re32.v v8, (a6) +; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, mu +; CHECK-NEXT: vfadd.vf v8, v8, fa0 +; CHECK-NEXT: vs1r.v v8, (a6) +; CHECK-NEXT: add a5, a5, a2 +; CHECK-NEXT: add a6, a6, a1 +; CHECK-NEXT: bne a5, a3, .LBB13_3 +; CHECK-NEXT: # %bb.4: # %middle.block +; CHECK-NEXT: beqz a4, .LBB13_7 +; CHECK-NEXT: .LBB13_5: # %for.body.preheader +; CHECK-NEXT: addi a1, a3, -1024 +; CHECK-NEXT: slli a2, a3, 2 +; CHECK-NEXT: add a0, a0, a2 +; CHECK-NEXT: .LBB13_6: # %for.body +; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 +; CHECK-NEXT: flw ft0, 0(a0) +; CHECK-NEXT: mv a2, a1 +; CHECK-NEXT: fadd.s ft0, ft0, fa0 +; CHECK-NEXT: fsw ft0, 0(a0) +; CHECK-NEXT: addi a1, a1, 1 +; CHECK-NEXT: addi a0, a0, 4 +; CHECK-NEXT: bgeu a1, a2, .LBB13_6 +; CHECK-NEXT: .LBB13_7: # %for.cond.cleanup +; CHECK-NEXT: ret +entry: + %0 = call i64 @llvm.vscale.i64() + %1 = shl i64 %0, 1 + %min.iters.check = icmp ugt i64 %1, 1024 + br i1 %min.iters.check, label %for.body.preheader, label %vector.ph + +vector.ph: ; preds = %entry + %2 = call i64 @llvm.vscale.i64() + %3 = shl i64 %2, 1 + %n.mod.vf = urem i64 1024, %3 + %n.vec = sub nsw i64 1024, %n.mod.vf + %broadcast.splatinsert = insertelement poison, float %x, i32 0 + %broadcast.splat = shufflevector %broadcast.splatinsert, poison, zeroinitializer + %4 = call i64 @llvm.vscale.i64() + %5 = shl i64 %4, 1 + br label %vector.body + +vector.body: ; preds = %vector.body, %vector.ph + %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ] + %6 = getelementptr inbounds float, float* %a, i64 %index + %7 = bitcast float* %6 to * + %wide.load = load , * %7, align 4 + %8 = fadd %broadcast.splat, %wide.load + %9 = bitcast float* %6 to * + store %8, * %9, align 4 + %index.next = add nuw i64 %index, %5 + %10 = icmp eq i64 %index.next, %n.vec + br i1 %10, label %middle.block, label %vector.body + +middle.block: ; preds = %vector.body + %cmp.n = icmp eq i64 %n.mod.vf, 0 + br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader + +for.body.preheader: ; preds = %entry, %middle.block + %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ] + br label %for.body + +for.cond.cleanup: ; preds = %for.body, %middle.block + ret void + +for.body: ; preds = %for.body.preheader, %for.body + %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ] + %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv + %11 = load float, float* %arrayidx, align 4 + %mul = fadd float %11, %x + store float %mul, float* %arrayidx, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 + %cmp.not = icmp eq i64 %indvars.iv.next, 1024 + br i1 %cmp.not, label %for.cond.cleanup, label %for.body +} + +declare i64 @llvm.vscale.i64() Index: llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll =================================================================== --- llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -3031,19 +3031,18 @@ define void @sink_splat_vp_add_commute(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: sink_splat_vp_add_commute: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; CHECK-NEXT: vmv.v.x v8, a1 -; CHECK-NEXT: li a1, 1024 +; CHECK-NEXT: li a3, 1024 ; CHECK-NEXT: .LBB48_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vle32.v v9, (a0) +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu +; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu -; CHECK-NEXT: vadd.vv v9, v8, v9, v0.t +; CHECK-NEXT: vadd.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu -; CHECK-NEXT: vse32.v v9, (a0) -; CHECK-NEXT: addi a1, a1, -4 +; CHECK-NEXT: vse32.v v8, (a0) +; CHECK-NEXT: addi a3, a3, -4 ; CHECK-NEXT: addi a0, a0, 16 -; CHECK-NEXT: bnez a1, .LBB48_1 +; CHECK-NEXT: bnez a3, .LBB48_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup ; CHECK-NEXT: ret entry: