diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -557,7 +557,7 @@ Node->getConstantOperandVal(Offset + 1) & 0x7); unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true, - /*MaskAgnostic*/ false); + /*MaskAgnostic*/ true); SDValue VTypeIOp = CurDAG->getTargetConstant(VTypeI, DL, XLenVT); SmallVector VTs = {XLenVT}; diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/masked-vslide1down-rv32.ll @@ -13,7 +13,7 @@ define @intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tumu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v9, v9, a0 @@ -35,7 +35,7 @@ define @intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tamu_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v9, v9, a0 @@ -59,7 +59,7 @@ define @intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tuma_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v9, v9, a0 @@ -82,7 +82,7 @@ define @intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, %2, i32 %3) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tama_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, ma ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 @@ -102,7 +102,7 @@ define @intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_tama_undef_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, ma ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll --- a/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll +++ b/llvm/test/CodeGen/RISCV/rvv/rvv-out-arguments.ll @@ -74,7 +74,7 @@ ; CHECK-NEXT: sw zero, -36(s0) ; CHECK-NEXT: sd zero, -48(s0) ; CHECK-NEXT: sd zero, -56(s0) -; CHECK-NEXT: vsetivli a0, 4, e32, m8, ta, mu +; CHECK-NEXT: vsetivli a0, 4, e32, m8, ta, ma ; CHECK-NEXT: sd a0, -64(s0) ; CHECK-NEXT: ld a0, -64(s0) ; CHECK-NEXT: addi a1, s0, -56 diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll --- a/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll +++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-tu.ll @@ -705,7 +705,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry -; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, ma ; RV32-NEXT: slli a2, a2, 1 ; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, ma ; RV32-NEXT: vmv1r.v v10, v8 @@ -737,7 +737,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, iXLen %3) nounwind { ; RV32-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: ; RV32: # %bb.0: # %entry -; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, mu +; RV32-NEXT: vsetvli a2, a2, e64, m1, ta, ma ; RV32-NEXT: slli a2, a2, 1 ; RV32-NEXT: vsetvli zero, a2, e32, m1, tu, ma ; RV32-NEXT: vmv1r.v v10, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvl-ext.ll @@ -7,7 +7,7 @@ define signext i32 @vsetvl_sext() { ; CHECK-LABEL: vsetvl_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a0, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli a0, 1, e16, m2, ta, ma ; CHECK-NEXT: ret %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1) %b = trunc i64 %a to i32 @@ -17,7 +17,7 @@ define zeroext i32 @vsetvl_zext() { ; CHECK-LABEL: vsetvl_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a0, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli a0, 1, e16, m2, ta, ma ; CHECK-NEXT: ret %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1) %b = trunc i64 %a to i32 @@ -27,7 +27,7 @@ define i64 @vsetvl_and17bits() { ; CHECK-LABEL: vsetvl_and17bits: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a0, 1, e16, m2, ta, mu +; CHECK-NEXT: vsetivli a0, 1, e16, m2, ta, ma ; CHECK-NEXT: ret %a = call i64 @llvm.riscv.vsetvli(i64 1, i64 1, i64 1) %b = and i64 %a, 131071 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -23,7 +23,7 @@ define @test1(i64 %avl, i8 zeroext %cond, %a, %b) nounwind { ; CHECK-LABEL: test1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: beqz a1, .LBB0_2 ; CHECK-NEXT: # %bb.1: # %if.then ; CHECK-NEXT: vfadd.vv v8, v8, v9 @@ -54,7 +54,7 @@ define @test2(i64 %avl, i8 zeroext %cond, %a, %b) nounwind { ; CHECK-LABEL: test2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: beqz a1, .LBB1_2 ; CHECK-NEXT: # %bb.1: # %if.then ; CHECK-NEXT: vfadd.vv v9, v8, v9 @@ -88,14 +88,13 @@ ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: beqz a1, .LBB2_2 ; CHECK-NEXT: # %bb.1: # %if.then -; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v9, v8, v9 -; CHECK-NEXT: j .LBB2_3 +; CHECK-NEXT: vfmul.vv v8, v9, v8 +; CHECK-NEXT: ret ; CHECK-NEXT: .LBB2_2: # %if.else -; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, ma ; CHECK-NEXT: vfsub.vv v9, v8, v9 -; CHECK-NEXT: .LBB2_3: # %if.end -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmul.vv v8, v9, v8 ; CHECK-NEXT: ret entry: @@ -179,7 +178,7 @@ ; CHECK-LABEL: test5: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: andi a2, a1, 1 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: bnez a2, .LBB4_3 ; CHECK-NEXT: # %bb.1: # %if.else ; CHECK-NEXT: vfsub.vv v9, v8, v9 @@ -236,14 +235,14 @@ ; CHECK-LABEL: test6: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: andi a3, a1, 1 -; CHECK-NEXT: vsetvli a2, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a2, a0, e64, m1, ta, ma ; CHECK-NEXT: bnez a3, .LBB5_3 ; CHECK-NEXT: # %bb.1: # %if.else ; CHECK-NEXT: vfsub.vv v8, v8, v9 ; CHECK-NEXT: andi a1, a1, 2 ; CHECK-NEXT: beqz a1, .LBB5_4 ; CHECK-NEXT: .LBB5_2: # %if.then4 -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: lui a0, %hi(.LCPI5_0) ; CHECK-NEXT: addi a0, a0, %lo(.LCPI5_0) ; CHECK-NEXT: vlse64.v v9, (a0), zero @@ -260,7 +259,7 @@ ; CHECK-NEXT: andi a1, a1, 2 ; CHECK-NEXT: bnez a1, .LBB5_2 ; CHECK-NEXT: .LBB5_4: # %if.else5 -; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: lui a0, 260096 ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: lui a0, 262144 @@ -330,7 +329,7 @@ ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: sub sp, sp, a2 -; CHECK-NEXT: vsetvli s0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli s0, a0, e64, m1, ta, ma ; CHECK-NEXT: beqz a1, .LBB6_2 ; CHECK-NEXT: # %bb.1: # %if.then ; CHECK-NEXT: vfadd.vv v8, v8, v9 @@ -389,7 +388,7 @@ ; CHECK-NEXT: csrr a2, vlenb ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: sub sp, sp, a2 -; CHECK-NEXT: vsetvli s0, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli s0, a0, e64, m1, ta, ma ; CHECK-NEXT: beqz a1, .LBB7_2 ; CHECK-NEXT: # %bb.1: # %if.then ; CHECK-NEXT: vfadd.vv v9, v8, v9 @@ -442,11 +441,10 @@ define void @saxpy_vec(i64 %n, float %a, float* nocapture readonly %x, float* nocapture %y) { ; CHECK-LABEL: saxpy_vec: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a3, a0, e32, m8, ta, ma ; CHECK-NEXT: beqz a3, .LBB8_2 ; CHECK-NEXT: .LBB8_1: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: vle32.v v16, (a2) ; CHECK-NEXT: slli a4, a3, 2 @@ -455,7 +453,7 @@ ; CHECK-NEXT: vfmacc.vf v16, fa0, v8 ; CHECK-NEXT: vse32.v v16, (a2) ; CHECK-NEXT: sub a0, a0, a3 -; CHECK-NEXT: vsetvli a3, a0, e32, m8, ta, mu +; CHECK-NEXT: vsetvli a3, a0, e32, m8, ta, ma ; CHECK-NEXT: add a2, a2, a4 ; CHECK-NEXT: bnez a3, .LBB8_1 ; CHECK-NEXT: .LBB8_2: # %for.end @@ -585,14 +583,13 @@ define void @vlmax(i64 %N, double* %c, double* %a, double* %b) { ; CHECK-LABEL: vlmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a6, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a6, zero, e64, m1, ta, ma ; CHECK-NEXT: blez a0, .LBB11_3 ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: li a5, 0 ; CHECK-NEXT: slli a4, a6, 3 ; CHECK-NEXT: .LBB11_2: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a2) ; CHECK-NEXT: vle64.v v9, (a3) ; CHECK-NEXT: vfadd.vv v8, v8, v9 @@ -633,7 +630,7 @@ define void @vector_init_vlmax(i64 %N, double* %c) { ; CHECK-LABEL: vector_init_vlmax: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma ; CHECK-NEXT: blez a0, .LBB12_3 ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: li a3, 0 @@ -641,7 +638,6 @@ ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: .LBB12_2: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vse64.v v8, (a1) ; CHECK-NEXT: add a3, a3, a2 ; CHECK-NEXT: add a1, a1, a4 @@ -670,7 +666,7 @@ define void @vector_init_vsetvli_N(i64 %N, double* %c) { ; CHECK-LABEL: vector_init_vsetvli_N: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a2, a0, e64, m1, ta, ma ; CHECK-NEXT: blez a0, .LBB13_3 ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: li a3, 0 @@ -709,7 +705,7 @@ ; CHECK-LABEL: vector_init_vsetvli_fv: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: li a2, 0 -; CHECK-NEXT: vsetivli a3, 4, e64, m1, ta, mu +; CHECK-NEXT: vsetivli a3, 4, e64, m1, ta, ma ; CHECK-NEXT: slli a4, a3, 3 ; CHECK-NEXT: vsetvli a5, zero, e64, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 @@ -835,7 +831,7 @@ ; CHECK-LABEL: pre_lmul: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: andi a0, a0, 1 -; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vadd.vv v8, v8, v9 diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -18,7 +18,7 @@ define @test1(i64 %avl, %a, %b) nounwind { ; CHECK-LABEL: test1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: ret @@ -51,6 +51,7 @@ define @test3(i64 %avl, %a, * %b, %c) nounwind { ; CHECK-LABEL: test3: ; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a1), v0.t ; CHECK-NEXT: ret @@ -86,7 +87,7 @@ define @test5( %0, %1, %2, i64 %avl) nounwind { ; CHECK-LABEL: test5: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vmseq.vv v8, v8, v9 ; CHECK-NEXT: vmand.mm v0, v8, v0 ; CHECK-NEXT: ret @@ -103,23 +104,22 @@ define void @test6(i32* nocapture readonly %A, i32* nocapture %B, i64 %n) { ; CHECK-LABEL: test6: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e32, m1, ta, ma ; CHECK-NEXT: beqz a3, .LBB5_3 ; CHECK-NEXT: # %bb.1: # %for.body.preheader ; CHECK-NEXT: li a4, 0 ; CHECK-NEXT: .LBB5_2: # %for.body ; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: slli a5, a4, 2 -; CHECK-NEXT: add a6, a0, a5 -; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma -; CHECK-NEXT: vle32.v v8, (a6) +; CHECK-NEXT: slli a6, a4, 2 +; CHECK-NEXT: add a5, a0, a6 +; CHECK-NEXT: vle32.v v8, (a5) ; CHECK-NEXT: vmsle.vi v9, v8, -3 ; CHECK-NEXT: vmsgt.vi v10, v8, 2 ; CHECK-NEXT: vmor.mm v0, v9, v10 -; CHECK-NEXT: add a5, a5, a1 -; CHECK-NEXT: vse32.v v8, (a5), v0.t +; CHECK-NEXT: add a6, a6, a1 +; CHECK-NEXT: vse32.v v8, (a6), v0.t ; CHECK-NEXT: add a4, a4, a3 -; CHECK-NEXT: vsetvli a3, a2, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e32, m1, ta, ma ; CHECK-NEXT: bnez a3, .LBB5_2 ; CHECK-NEXT: .LBB5_3: # %for.cond.cleanup ; CHECK-NEXT: ret @@ -259,7 +259,7 @@ define @test14(i64 %avl, %a, %b) nounwind { ; CHECK-LABEL: test14: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; CHECK-NEXT: vfadd.vv v8, v8, v9 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -308,7 +308,7 @@ define @test16(i64 %avl, double %a, %b) nounwind { ; CHECK-LABEL: test16: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a0, a0, e64, mf2, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e64, mf2, ta, ma ; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma ; CHECK-NEXT: vfmv.v.f v9, fa0 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma @@ -399,7 +399,7 @@ define i64 @avl_forward1( %v, * %p) nounwind { ; CHECK-LABEL: avl_forward1: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli a1, 6, e32, m1, ta, mu +; CHECK-NEXT: vsetivli a1, 6, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: ret @@ -413,7 +413,7 @@ define i64 @avl_forward1b_neg( %v, * %p) nounwind { ; CHECK-LABEL: avl_forward1b_neg: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetivli a1, 6, e16, m1, ta, mu +; CHECK-NEXT: vsetivli a1, 6, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: mv a0, a1 @@ -427,7 +427,7 @@ define i64 @avl_forward2( %v, * %p) nounwind { ; CHECK-LABEL: avl_forward2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: ret @@ -442,7 +442,7 @@ define void @avl_forward3( %v, * %p, i64 %reg) nounwind { ; CHECK-LABEL: avl_forward3: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret entry: @@ -455,7 +455,7 @@ define i64 @avl_forward3b( %v, * %p, i64 %reg) nounwind { ; CHECK-LABEL: avl_forward3b: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a1, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: mv a0, a1 ; CHECK-NEXT: ret @@ -469,7 +469,7 @@ define void @avl_forward4( %v, * %p, i64 %reg) nounwind { ; CHECK-LABEL: avl_forward4: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, a1, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: ret @@ -483,7 +483,7 @@ define i64 @avl_forward4b( %v, * %p, i64 %reg) nounwind { ; CHECK-LABEL: avl_forward4b: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e16, m1, ta, mu +; CHECK-NEXT: vsetvli a1, a1, e16, m1, ta, ma ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vse32.v v8, (a0) ; CHECK-NEXT: mv a0, a1 @@ -499,7 +499,7 @@ define @vleNff(i64* %str, i64 %n, i64 %x) { ; CHECK-LABEL: vleNff: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma @@ -520,7 +520,7 @@ define @vleNff2(i64* %str, i64 %n, i64 %x) { ; CHECK-LABEL: vleNff2: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64ff.v v8, (a0) ; CHECK-NEXT: vadd.vx v8, v8, a2 @@ -546,7 +546,7 @@ ; CHECK-LABEL: avl_forward5: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 -; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, mu +; CHECK-NEXT: vsetvli a1, a1, e8, m4, ta, ma ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0) ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-intrinsics.ll @@ -10,7 +10,7 @@ define iXLen @test_vsetvli_e8m1(iXLen %avl) nounwind { ; CHECK-LABEL: test_vsetvli_e8m1: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, a0, e8, m1, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e8, m1, ta, ma ; CHECK-NEXT: ret %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 0, iXLen 0) ret iXLen %vl @@ -19,7 +19,7 @@ define iXLen @test_vsetvli_e16mf4(iXLen %avl) nounwind { ; CHECK-LABEL: test_vsetvli_e16mf4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, a0, e16, mf4, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e16, mf4, ta, ma ; CHECK-NEXT: ret %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 1, iXLen 6) ret iXLen %vl @@ -28,7 +28,7 @@ define iXLen @test_vsetvli_e64mf8(iXLen %avl) nounwind { ; CHECK-LABEL: test_vsetvli_e64mf8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, a0, e64, mf8, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e64, mf8, ta, ma ; CHECK-NEXT: ret %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 3, iXLen 5) ret iXLen %vl @@ -37,7 +37,7 @@ define iXLen @test_vsetvli_e8mf2_zero_avl() nounwind { ; CHECK-LABEL: test_vsetvli_e8mf2_zero_avl: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a0, 0, e8, mf2, ta, mu +; CHECK-NEXT: vsetivli a0, 0, e8, mf2, ta, ma ; CHECK-NEXT: ret %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 0, iXLen 7) ret iXLen %vl @@ -46,7 +46,7 @@ define iXLen @test_vsetvli_e32mf8_zero_avl() nounwind { ; CHECK-LABEL: test_vsetvli_e32mf8_zero_avl: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli a0, 0, e16, mf4, ta, mu +; CHECK-NEXT: vsetivli a0, 0, e16, mf4, ta, ma ; CHECK-NEXT: ret %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen 0, iXLen 1, iXLen 6) ret iXLen %vl @@ -55,7 +55,7 @@ define iXLen @test_vsetvlimax_e32m2() nounwind { ; CHECK-LABEL: test_vsetvlimax_e32m2: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: ret %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 2, iXLen 1) ret iXLen %vl @@ -64,7 +64,7 @@ define iXLen @test_vsetvlimax_e64m4() nounwind { ; CHECK-LABEL: test_vsetvlimax_e64m4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; CHECK-NEXT: ret %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 2) ret iXLen %vl @@ -73,7 +73,7 @@ define iXLen @test_vsetvlimax_e64m8() nounwind { ; CHECK-LABEL: test_vsetvlimax_e64m8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: ret %vl = call iXLen @llvm.riscv.vsetvlimax.iXLen(iXLen 3, iXLen 3) ret iXLen %vl @@ -102,7 +102,7 @@ define @redundant_vsetvli(iXLen %avl, * %ptr) nounwind { ; CHECK-LABEL: redundant_vsetvli: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: ret %vl = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1) @@ -117,8 +117,8 @@ define @repeated_vsetvli(iXLen %avl, * %ptr) nounwind { ; CHECK-LABEL: repeated_vsetvli: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a0, a0, e32, m2, ta, mu -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu +; CHECK-NEXT: vsetvli a0, a0, e32, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vle32.v v8, (a1) ; CHECK-NEXT: ret %vl0 = call iXLen @llvm.riscv.vsetvli.iXLen(iXLen %avl, iXLen 2, iXLen 1) diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll @@ -51,7 +51,7 @@ ; ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-64-NEXT: ret @@ -68,7 +68,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli a2, 3, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli a2, 3, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 @@ -84,7 +84,7 @@ ; ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-64-NEXT: ret @@ -101,7 +101,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli a2, 8, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli a2, 8, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 @@ -117,7 +117,7 @@ ; ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-64-NEXT: ret @@ -134,7 +134,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli a2, 9, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli a2, 9, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 @@ -143,7 +143,7 @@ ; ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetivli a2, 9, e64, m1, ta, mu +; CHECK-512-NEXT: vsetivli a2, 9, e64, m1, ta, ma ; CHECK-512-NEXT: slli a2, a2, 1 ; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0 @@ -152,7 +152,7 @@ ; ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-64-NEXT: ret @@ -169,7 +169,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli a2, 15, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli a2, 15, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 @@ -178,7 +178,7 @@ ; ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetivli a2, 15, e64, m1, ta, mu +; CHECK-512-NEXT: vsetivli a2, 15, e64, m1, ta, ma ; CHECK-512-NEXT: slli a2, a2, 1 ; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0 @@ -187,7 +187,7 @@ ; ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-64-NEXT: ret @@ -204,7 +204,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli a2, 16, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli a2, 16, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 @@ -213,14 +213,14 @@ ; ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-512-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-512-NEXT: ret ; ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-64-NEXT: ret @@ -238,7 +238,7 @@ ; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047: ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: li a2, 2047 -; CHECK-128-65536-NEXT: vsetvli a2, a2, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli a2, a2, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 @@ -247,14 +247,14 @@ ; ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-512-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-512-NEXT: ret ; ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-64-NEXT: ret @@ -271,7 +271,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-rv32.ll @@ -856,7 +856,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, ma ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 @@ -883,7 +883,7 @@ define @intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma ; CHECK-NEXT: vslide1down.vx v9, v9, a0 @@ -911,7 +911,7 @@ define @intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a2, a2, e64, m2, ta, ma ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 @@ -938,7 +938,7 @@ define @intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e64, m2, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, ma ; CHECK-NEXT: vslide1down.vx v10, v10, a0 @@ -966,7 +966,7 @@ define @intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a2, a2, e64, m4, ta, ma ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 @@ -993,7 +993,7 @@ define @intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e64, m4, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma ; CHECK-NEXT: vslide1down.vx v12, v12, a0 @@ -1021,7 +1021,7 @@ define @intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a2, a2, e64, m8, ta, ma ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vslide1down.vx v8, v8, a0 @@ -1048,7 +1048,7 @@ define @intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1down_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e64, m8, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vslide1down.vx v16, v16, a0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll @@ -51,7 +51,7 @@ ; ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-64-NEXT: ret @@ -68,7 +68,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli a2, 3, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli a2, 3, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 @@ -84,7 +84,7 @@ ; ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-64-NEXT: ret @@ -101,7 +101,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli a2, 8, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli a2, 8, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 @@ -117,7 +117,7 @@ ; ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-64-NEXT: ret @@ -134,7 +134,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli a2, 9, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli a2, 9, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 @@ -143,7 +143,7 @@ ; ; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetivli a2, 9, e64, m1, ta, mu +; CHECK-512-NEXT: vsetivli a2, 9, e64, m1, ta, ma ; CHECK-512-NEXT: slli a2, a2, 1 ; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 @@ -152,7 +152,7 @@ ; ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-64-NEXT: ret @@ -169,7 +169,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli a2, 15, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli a2, 15, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 @@ -178,7 +178,7 @@ ; ; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetivli a2, 15, e64, m1, ta, mu +; CHECK-512-NEXT: vsetivli a2, 15, e64, m1, ta, ma ; CHECK-512-NEXT: slli a2, a2, 1 ; CHECK-512-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 @@ -187,7 +187,7 @@ ; ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-64-NEXT: ret @@ -204,7 +204,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16( %0, i64 %1) nounwind { ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16: ; CHECK-128-65536: # %bb.0: # %entry -; CHECK-128-65536-NEXT: vsetivli a2, 16, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetivli a2, 16, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 @@ -213,14 +213,14 @@ ; ; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-512-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-512-NEXT: ret ; ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-64-NEXT: ret @@ -238,7 +238,7 @@ ; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047: ; CHECK-128-65536: # %bb.0: # %entry ; CHECK-128-65536-NEXT: li a2, 2047 -; CHECK-128-65536-NEXT: vsetvli a2, a2, e64, m1, ta, mu +; CHECK-128-65536-NEXT: vsetvli a2, a2, e64, m1, ta, ma ; CHECK-128-65536-NEXT: slli a2, a2, 1 ; CHECK-128-65536-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 @@ -247,14 +247,14 @@ ; ; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-512-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-512-NEXT: ret ; ; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-64-NEXT: ret @@ -271,7 +271,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048( %0, i64 %1) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, mu +; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a1 ; CHECK-NEXT: vslide1up.vx v8, v9, a0 ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-rv32.ll @@ -874,7 +874,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a2, a2, e64, m1, ta, ma ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v9, v8, a1 @@ -901,7 +901,7 @@ define @intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i64_nxv1i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e64, m1, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: vsetvli zero, a3, e32, m1, ta, ma ; CHECK-NEXT: vslide1up.vx v10, v9, a1 @@ -929,7 +929,7 @@ define @intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a2, a2, e64, m2, ta, ma ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vslide1up.vx v10, v8, a1 @@ -956,7 +956,7 @@ define @intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv2i64_nxv2i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m2, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e64, m2, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: vsetvli zero, a3, e32, m2, ta, ma ; CHECK-NEXT: vslide1up.vx v12, v10, a1 @@ -984,7 +984,7 @@ define @intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a2, a2, e64, m4, ta, ma ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vslide1up.vx v12, v8, a1 @@ -1011,7 +1011,7 @@ define @intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv4i64_nxv4i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m4, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e64, m4, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma ; CHECK-NEXT: vslide1up.vx v16, v12, a1 @@ -1039,7 +1039,7 @@ define @intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64( %0, i64 %1, i32 %2) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a2, a2, e64, m8, ta, ma ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma ; CHECK-NEXT: vslide1up.vx v16, v8, a1 @@ -1066,7 +1066,7 @@ define @intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv8i64_nxv8i64_i64: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a3, a2, e64, m8, ta, mu +; CHECK-NEXT: vsetvli a3, a2, e64, m8, ta, ma ; CHECK-NEXT: slli a3, a3, 1 ; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma ; CHECK-NEXT: vslide1up.vx v24, v16, a1 diff --git a/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll b/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll --- a/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll +++ b/llvm/test/Transforms/LoopStrengthReduce/RISCV/lsr-drop-solution.ll @@ -7,7 +7,7 @@ define ptr @foo(ptr %a0, ptr %a1, i64 %a2) { ; CHECK-LABEL: foo: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a4, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a4, a2, e8, m8, ta, ma ; CHECK-NEXT: bne a4, a2, .LBB0_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a3, a0 @@ -29,7 +29,7 @@ ; CHECK-NEXT: bltu a3, a5, .LBB0_3 ; CHECK-NEXT: # %bb.4: # %do.end ; CHECK-NEXT: sub a2, a2, a3 -; CHECK-NEXT: vsetvli a2, a2, e8, m8, ta, mu +; CHECK-NEXT: vsetvli a2, a2, e8, m8, ta, ma ; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a3)