diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll --- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll +++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll @@ -3775,15 +3775,12 @@ ; CHECK-LABEL: sink_splat_mul_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB67_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vmul.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 32 ; CHECK-NEXT: bnez a2, .LBB67_1 @@ -3798,10 +3795,10 @@ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i64, i64* %a, i64 %index %1 = bitcast i64* %0 to <4 x i64>* - %wide.load = load <4 x i64>, <4 x i64>* %1, align 4 + %wide.load = load <4 x i64>, <4 x i64>* %1, align 8 %2 = mul <4 x i64> %wide.load, %broadcast.splat %3 = bitcast i64* %0 to <4 x i64>* - store <4 x i64> %2, <4 x i64>* %3, align 4 + store <4 x i64> %2, <4 x i64>* %3, align 8 %index.next = add nuw i64 %index, 4 %4 = icmp eq i64 %index.next, 1024 br i1 %4, label %for.cond.cleanup, label %vector.body @@ -3814,15 +3811,12 @@ ; CHECK-LABEL: sink_splat_add_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB68_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 32 ; CHECK-NEXT: bnez a2, .LBB68_1 @@ -3837,10 +3831,10 @@ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i64, i64* %a, i64 %index %1 = bitcast i64* %0 to <4 x i64>* - %wide.load = load <4 x i64>, <4 x i64>* %1, align 4 + %wide.load = load <4 x i64>, <4 x i64>* %1, align 8 %2 = add <4 x i64> %wide.load, %broadcast.splat %3 = bitcast i64* %0 to <4 x i64>* - store <4 x i64> %2, <4 x i64>* %3, align 4 + store <4 x i64> %2, <4 x i64>* %3, align 8 %index.next = add nuw i64 %index, 4 %4 = icmp eq i64 %index.next, 1024 br i1 %4, label %for.cond.cleanup, label %vector.body @@ -3853,15 +3847,12 @@ ; CHECK-LABEL: sink_splat_sub_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB69_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vsub.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 32 ; CHECK-NEXT: bnez a2, .LBB69_1 @@ -3876,10 +3867,10 @@ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i64, i64* %a, i64 %index %1 = bitcast i64* %0 to <4 x i64>* - %wide.load = load <4 x i64>, <4 x i64>* %1, align 4 + %wide.load = load <4 x i64>, <4 x i64>* %1, align 8 %2 = sub <4 x i64> %wide.load, %broadcast.splat %3 = bitcast i64* %0 to <4 x i64>* - store <4 x i64> %2, <4 x i64>* %3, align 4 + store <4 x i64> %2, <4 x i64>* %3, align 8 %index.next = add nuw i64 %index, 4 %4 = icmp eq i64 %index.next, 1024 br i1 %4, label %for.cond.cleanup, label %vector.body @@ -3892,15 +3883,12 @@ ; CHECK-LABEL: sink_splat_rsub_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB70_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vrsub.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 32 ; CHECK-NEXT: bnez a2, .LBB70_1 @@ -3915,10 +3903,10 @@ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i64, i64* %a, i64 %index %1 = bitcast i64* %0 to <4 x i64>* - %wide.load = load <4 x i64>, <4 x i64>* %1, align 4 + %wide.load = load <4 x i64>, <4 x i64>* %1, align 8 %2 = sub <4 x i64> %broadcast.splat, %wide.load %3 = bitcast i64* %0 to <4 x i64>* - store <4 x i64> %2, <4 x i64>* %3, align 4 + store <4 x i64> %2, <4 x i64>* %3, align 8 %index.next = add nuw i64 %index, 4 %4 = icmp eq i64 %index.next, 1024 br i1 %4, label %for.cond.cleanup, label %vector.body @@ -3931,15 +3919,12 @@ ; CHECK-LABEL: sink_splat_and_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB71_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vand.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 32 ; CHECK-NEXT: bnez a2, .LBB71_1 @@ -3954,10 +3939,10 @@ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i64, i64* %a, i64 %index %1 = bitcast i64* %0 to <4 x i64>* - %wide.load = load <4 x i64>, <4 x i64>* %1, align 4 + %wide.load = load <4 x i64>, <4 x i64>* %1, align 8 %2 = and <4 x i64> %wide.load, %broadcast.splat %3 = bitcast i64* %0 to <4 x i64>* - store <4 x i64> %2, <4 x i64>* %3, align 4 + store <4 x i64> %2, <4 x i64>* %3, align 8 %index.next = add nuw i64 %index, 4 %4 = icmp eq i64 %index.next, 1024 br i1 %4, label %for.cond.cleanup, label %vector.body @@ -3970,15 +3955,12 @@ ; CHECK-LABEL: sink_splat_or_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB72_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vor.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 32 ; CHECK-NEXT: bnez a2, .LBB72_1 @@ -3993,10 +3975,10 @@ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i64, i64* %a, i64 %index %1 = bitcast i64* %0 to <4 x i64>* - %wide.load = load <4 x i64>, <4 x i64>* %1, align 4 + %wide.load = load <4 x i64>, <4 x i64>* %1, align 8 %2 = or <4 x i64> %wide.load, %broadcast.splat %3 = bitcast i64* %0 to <4 x i64>* - store <4 x i64> %2, <4 x i64>* %3, align 4 + store <4 x i64> %2, <4 x i64>* %3, align 8 %index.next = add nuw i64 %index, 4 %4 = icmp eq i64 %index.next, 1024 br i1 %4, label %for.cond.cleanup, label %vector.body @@ -4009,15 +3991,12 @@ ; CHECK-LABEL: sink_splat_xor_lmul2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 1024 -; CHECK-NEXT: li a3, 32 ; CHECK-NEXT: .LBB73_1: # %vector.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vle8.v v8, (a0) ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu +; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: vxor.vx v8, v8, a1 -; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu -; CHECK-NEXT: vse8.v v8, (a0) +; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: addi a2, a2, -4 ; CHECK-NEXT: addi a0, a0, 32 ; CHECK-NEXT: bnez a2, .LBB73_1 @@ -4032,10 +4011,10 @@ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i64, i64* %a, i64 %index %1 = bitcast i64* %0 to <4 x i64>* - %wide.load = load <4 x i64>, <4 x i64>* %1, align 4 + %wide.load = load <4 x i64>, <4 x i64>* %1, align 8 %2 = xor <4 x i64> %wide.load, %broadcast.splat %3 = bitcast i64* %0 to <4 x i64>* - store <4 x i64> %2, <4 x i64>* %3, align 4 + store <4 x i64> %2, <4 x i64>* %3, align 8 %index.next = add nuw i64 %index, 4 %4 = icmp eq i64 %index.next, 1024 br i1 %4, label %for.cond.cleanup, label %vector.body